github.com/myafeier/go-ethereum@v1.6.8-0.20170719123245-3e0dbe0eaa72/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/common"
    29  	"github.com/ethereum/go-ethereum/core"
    30  	"github.com/ethereum/go-ethereum/core/state"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/crypto"
    33  	"github.com/ethereum/go-ethereum/ethdb"
    34  	"github.com/ethereum/go-ethereum/event"
    35  	"github.com/ethereum/go-ethereum/params"
    36  	"github.com/ethereum/go-ethereum/trie"
    37  )
    38  
    39  var (
    40  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    41  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    42  )
    43  
    44  // Reduce some of the parameters to make the tester faster.
    45  func init() {
    46  	MaxForkAncestry = uint64(10000)
    47  	blockCacheLimit = 1024
    48  	fsCriticalTrials = 10
    49  }
    50  
    51  // downloadTester is a test simulator for mocking out local block chain.
    52  type downloadTester struct {
    53  	downloader *Downloader
    54  
    55  	genesis *types.Block   // Genesis blocks used by the tester and peers
    56  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    57  	peerDb  ethdb.Database // Database of the peers containing all data
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    66  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    67  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    68  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    69  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    70  
    71  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    72  
    73  	lock sync.RWMutex
    74  }
    75  
    76  // newTester creates a new downloader test mocker.
    77  func newTester() *downloadTester {
    78  	testdb, _ := ethdb.NewMemDatabase()
    79  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    80  
    81  	tester := &downloadTester{
    82  		genesis:           genesis,
    83  		peerDb:            testdb,
    84  		ownHashes:         []common.Hash{genesis.Hash()},
    85  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    86  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    87  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    88  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    89  		peerHashes:        make(map[string][]common.Hash),
    90  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    91  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    92  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    93  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    94  		peerMissingStates: make(map[string]map[common.Hash]bool),
    95  	}
    96  	tester.stateDb, _ = ethdb.NewMemDatabase()
    97  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    98  
    99  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   100  
   101  	return tester
   102  }
   103  
   104  // makeChain creates a chain of n blocks starting at and including parent.
   105  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   106  // contains a transaction and every 5th an uncle to allow testing correct block
   107  // reassembly.
   108  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   109  	// Generate the block chain
   110  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, dl.peerDb, n, func(i int, block *core.BlockGen) {
   111  		block.SetCoinbase(common.Address{seed})
   112  
   113  		// If a heavy chain is requested, delay blocks to raise difficulty
   114  		if heavy {
   115  			block.OffsetTime(-1)
   116  		}
   117  		// If the block number is multiple of 3, send a bonus transaction to the miner
   118  		if parent == dl.genesis && i%3 == 0 {
   119  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   120  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), new(big.Int).SetUint64(params.TxGas), nil, nil), signer, testKey)
   121  			if err != nil {
   122  				panic(err)
   123  			}
   124  			block.AddTx(tx)
   125  		}
   126  		// If the block number is a multiple of 5, add a bonus uncle to the block
   127  		if i > 0 && i%5 == 0 {
   128  			block.AddUncle(&types.Header{
   129  				ParentHash: block.PrevBlock(i - 1).Hash(),
   130  				Number:     big.NewInt(block.Number().Int64() - 1),
   131  			})
   132  		}
   133  	})
   134  	// Convert the block-chain into a hash-chain and header/block maps
   135  	hashes := make([]common.Hash, n+1)
   136  	hashes[len(hashes)-1] = parent.Hash()
   137  
   138  	headerm := make(map[common.Hash]*types.Header, n+1)
   139  	headerm[parent.Hash()] = parent.Header()
   140  
   141  	blockm := make(map[common.Hash]*types.Block, n+1)
   142  	blockm[parent.Hash()] = parent
   143  
   144  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   145  	receiptm[parent.Hash()] = parentReceipts
   146  
   147  	for i, b := range blocks {
   148  		hashes[len(hashes)-i-2] = b.Hash()
   149  		headerm[b.Hash()] = b.Header()
   150  		blockm[b.Hash()] = b
   151  		receiptm[b.Hash()] = receipts[i]
   152  	}
   153  	return hashes, headerm, blockm, receiptm
   154  }
   155  
   156  // makeChainFork creates two chains of length n, such that h1[:f] and
   157  // h2[:f] are different but have a common suffix of length n-f.
   158  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   159  	// Create the common suffix
   160  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   161  
   162  	// Create the forks, making the second heavyer if non balanced forks were requested
   163  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   164  	hashes1 = append(hashes1, hashes[1:]...)
   165  
   166  	heavy := false
   167  	if !balanced {
   168  		heavy = true
   169  	}
   170  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   171  	hashes2 = append(hashes2, hashes[1:]...)
   172  
   173  	for hash, header := range headers {
   174  		headers1[hash] = header
   175  		headers2[hash] = header
   176  	}
   177  	for hash, block := range blocks {
   178  		blocks1[hash] = block
   179  		blocks2[hash] = block
   180  	}
   181  	for hash, receipt := range receipts {
   182  		receipts1[hash] = receipt
   183  		receipts2[hash] = receipt
   184  	}
   185  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   186  }
   187  
   188  // terminate aborts any operations on the embedded downloader and releases all
   189  // held resources.
   190  func (dl *downloadTester) terminate() {
   191  	dl.downloader.Terminate()
   192  }
   193  
   194  // sync starts synchronizing with a remote peer, blocking until it completes.
   195  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   196  	dl.lock.RLock()
   197  	hash := dl.peerHashes[id][0]
   198  	// If no particular TD was requested, load from the peer's blockchain
   199  	if td == nil {
   200  		td = big.NewInt(1)
   201  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   202  			td = diff
   203  		}
   204  	}
   205  	dl.lock.RUnlock()
   206  
   207  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   208  	err := dl.downloader.synchronise(id, hash, td, mode)
   209  	select {
   210  	case <-dl.downloader.cancelCh:
   211  		// Ok, downloader fully cancelled after sync cycle
   212  	default:
   213  		// Downloader is still accepting packets, can block a peer up
   214  		panic("downloader active post sync cycle") // panic will be caught by tester
   215  	}
   216  	return err
   217  }
   218  
   219  // HasHeader checks if a header is present in the testers canonical chain.
   220  func (dl *downloadTester) HasHeader(hash common.Hash) bool {
   221  	return dl.GetHeaderByHash(hash) != nil
   222  }
   223  
   224  // HasBlockAndState checks if a block and associated state is present in the testers canonical chain.
   225  func (dl *downloadTester) HasBlockAndState(hash common.Hash) bool {
   226  	block := dl.GetBlockByHash(hash)
   227  	if block == nil {
   228  		return false
   229  	}
   230  	_, err := dl.stateDb.Get(block.Root().Bytes())
   231  	return err == nil
   232  }
   233  
   234  // GetHeader retrieves a header from the testers canonical chain.
   235  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   236  	dl.lock.RLock()
   237  	defer dl.lock.RUnlock()
   238  
   239  	return dl.ownHeaders[hash]
   240  }
   241  
   242  // GetBlock retrieves a block from the testers canonical chain.
   243  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   244  	dl.lock.RLock()
   245  	defer dl.lock.RUnlock()
   246  
   247  	return dl.ownBlocks[hash]
   248  }
   249  
   250  // CurrentHeader retrieves the current head header from the canonical chain.
   251  func (dl *downloadTester) CurrentHeader() *types.Header {
   252  	dl.lock.RLock()
   253  	defer dl.lock.RUnlock()
   254  
   255  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   256  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   257  			return header
   258  		}
   259  	}
   260  	return dl.genesis.Header()
   261  }
   262  
   263  // CurrentBlock retrieves the current head block from the canonical chain.
   264  func (dl *downloadTester) CurrentBlock() *types.Block {
   265  	dl.lock.RLock()
   266  	defer dl.lock.RUnlock()
   267  
   268  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   269  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   270  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   271  				return block
   272  			}
   273  		}
   274  	}
   275  	return dl.genesis
   276  }
   277  
   278  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   279  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   280  	dl.lock.RLock()
   281  	defer dl.lock.RUnlock()
   282  
   283  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   284  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   285  			return block
   286  		}
   287  	}
   288  	return dl.genesis
   289  }
   290  
   291  // FastSyncCommitHead manually sets the head block to a given hash.
   292  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   293  	// For now only check that the state trie is correct
   294  	if block := dl.GetBlockByHash(hash); block != nil {
   295  		_, err := trie.NewSecure(block.Root(), dl.stateDb, 0)
   296  		return err
   297  	}
   298  	return fmt.Errorf("non existent block: %x", hash[:4])
   299  }
   300  
   301  // GetTdByHash retrieves the block's total difficulty from the canonical chain.
   302  func (dl *downloadTester) GetTdByHash(hash common.Hash) *big.Int {
   303  	dl.lock.RLock()
   304  	defer dl.lock.RUnlock()
   305  
   306  	return dl.ownChainTd[hash]
   307  }
   308  
   309  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   310  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   311  	dl.lock.Lock()
   312  	defer dl.lock.Unlock()
   313  
   314  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   315  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   316  		return 0, errors.New("unknown parent")
   317  	}
   318  	for i := 1; i < len(headers); i++ {
   319  		if headers[i].ParentHash != headers[i-1].Hash() {
   320  			return i, errors.New("unknown parent")
   321  		}
   322  	}
   323  	// Do a full insert if pre-checks passed
   324  	for i, header := range headers {
   325  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   326  			continue
   327  		}
   328  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   329  			return i, errors.New("unknown parent")
   330  		}
   331  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   332  		dl.ownHeaders[header.Hash()] = header
   333  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   334  	}
   335  	return len(headers), nil
   336  }
   337  
   338  // InsertChain injects a new batch of blocks into the simulated chain.
   339  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   340  	dl.lock.Lock()
   341  	defer dl.lock.Unlock()
   342  
   343  	for i, block := range blocks {
   344  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   345  			return i, errors.New("unknown parent")
   346  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   347  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   348  		}
   349  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   350  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   351  			dl.ownHeaders[block.Hash()] = block.Header()
   352  		}
   353  		dl.ownBlocks[block.Hash()] = block
   354  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   355  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   356  	}
   357  	return len(blocks), nil
   358  }
   359  
   360  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   361  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   362  	dl.lock.Lock()
   363  	defer dl.lock.Unlock()
   364  
   365  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   366  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   367  			return i, errors.New("unknown owner")
   368  		}
   369  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   370  			return i, errors.New("unknown parent")
   371  		}
   372  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   373  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   374  	}
   375  	return len(blocks), nil
   376  }
   377  
   378  // Rollback removes some recently added elements from the chain.
   379  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   380  	dl.lock.Lock()
   381  	defer dl.lock.Unlock()
   382  
   383  	for i := len(hashes) - 1; i >= 0; i-- {
   384  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   385  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   386  		}
   387  		delete(dl.ownChainTd, hashes[i])
   388  		delete(dl.ownHeaders, hashes[i])
   389  		delete(dl.ownReceipts, hashes[i])
   390  		delete(dl.ownBlocks, hashes[i])
   391  	}
   392  }
   393  
   394  // newPeer registers a new block download source into the downloader.
   395  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   396  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   397  }
   398  
   399  // newSlowPeer registers a new block download source into the downloader, with a
   400  // specific delay time on processing the network packets sent to it, simulating
   401  // potentially slow network IO.
   402  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   403  	dl.lock.Lock()
   404  	defer dl.lock.Unlock()
   405  
   406  	var err error
   407  	err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl, id, delay})
   408  	if err == nil {
   409  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   410  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   411  		copy(dl.peerHashes[id], hashes)
   412  
   413  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   414  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   415  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   416  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   417  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   418  
   419  		genesis := hashes[len(hashes)-1]
   420  		if header := headers[genesis]; header != nil {
   421  			dl.peerHeaders[id][genesis] = header
   422  			dl.peerChainTds[id][genesis] = header.Difficulty
   423  		}
   424  		if block := blocks[genesis]; block != nil {
   425  			dl.peerBlocks[id][genesis] = block
   426  			dl.peerChainTds[id][genesis] = block.Difficulty()
   427  		}
   428  
   429  		for i := len(hashes) - 2; i >= 0; i-- {
   430  			hash := hashes[i]
   431  
   432  			if header, ok := headers[hash]; ok {
   433  				dl.peerHeaders[id][hash] = header
   434  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   435  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   436  				}
   437  			}
   438  			if block, ok := blocks[hash]; ok {
   439  				dl.peerBlocks[id][hash] = block
   440  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   441  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   442  				}
   443  			}
   444  			if receipt, ok := receipts[hash]; ok {
   445  				dl.peerReceipts[id][hash] = receipt
   446  			}
   447  		}
   448  	}
   449  	return err
   450  }
   451  
   452  // dropPeer simulates a hard peer removal from the connection pool.
   453  func (dl *downloadTester) dropPeer(id string) {
   454  	dl.lock.Lock()
   455  	defer dl.lock.Unlock()
   456  
   457  	delete(dl.peerHashes, id)
   458  	delete(dl.peerHeaders, id)
   459  	delete(dl.peerBlocks, id)
   460  	delete(dl.peerChainTds, id)
   461  
   462  	dl.downloader.UnregisterPeer(id)
   463  }
   464  
   465  type downloadTesterPeer struct {
   466  	dl    *downloadTester
   467  	id    string
   468  	delay time.Duration
   469  }
   470  
   471  // Head constructs a function to retrieve a peer's current head hash
   472  // and total difficulty.
   473  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   474  	dlp.dl.lock.RLock()
   475  	defer dlp.dl.lock.RUnlock()
   476  
   477  	return dlp.dl.peerHashes[dlp.id][0], nil
   478  }
   479  
   480  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   481  // origin; associated with a particular peer in the download tester. The returned
   482  // function can be used to retrieve batches of headers from the particular peer.
   483  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   484  	// Find the canonical number of the hash
   485  	dlp.dl.lock.RLock()
   486  	number := uint64(0)
   487  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   488  		if hash == origin {
   489  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   490  			break
   491  		}
   492  	}
   493  	dlp.dl.lock.RUnlock()
   494  
   495  	// Use the absolute header fetcher to satisfy the query
   496  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   497  }
   498  
   499  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   500  // origin; associated with a particular peer in the download tester. The returned
   501  // function can be used to retrieve batches of headers from the particular peer.
   502  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   503  	time.Sleep(dlp.delay)
   504  
   505  	dlp.dl.lock.RLock()
   506  	defer dlp.dl.lock.RUnlock()
   507  
   508  	// Gather the next batch of headers
   509  	hashes := dlp.dl.peerHashes[dlp.id]
   510  	headers := dlp.dl.peerHeaders[dlp.id]
   511  	result := make([]*types.Header, 0, amount)
   512  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   513  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   514  			result = append(result, header)
   515  		}
   516  	}
   517  	// Delay delivery a bit to allow attacks to unfold
   518  	go func() {
   519  		time.Sleep(time.Millisecond)
   520  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   521  	}()
   522  	return nil
   523  }
   524  
   525  // RequestBodies constructs a getBlockBodies method associated with a particular
   526  // peer in the download tester. The returned function can be used to retrieve
   527  // batches of block bodies from the particularly requested peer.
   528  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   529  	time.Sleep(dlp.delay)
   530  
   531  	dlp.dl.lock.RLock()
   532  	defer dlp.dl.lock.RUnlock()
   533  
   534  	blocks := dlp.dl.peerBlocks[dlp.id]
   535  
   536  	transactions := make([][]*types.Transaction, 0, len(hashes))
   537  	uncles := make([][]*types.Header, 0, len(hashes))
   538  
   539  	for _, hash := range hashes {
   540  		if block, ok := blocks[hash]; ok {
   541  			transactions = append(transactions, block.Transactions())
   542  			uncles = append(uncles, block.Uncles())
   543  		}
   544  	}
   545  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   546  
   547  	return nil
   548  }
   549  
   550  // RequestReceipts constructs a getReceipts method associated with a particular
   551  // peer in the download tester. The returned function can be used to retrieve
   552  // batches of block receipts from the particularly requested peer.
   553  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   554  	time.Sleep(dlp.delay)
   555  
   556  	dlp.dl.lock.RLock()
   557  	defer dlp.dl.lock.RUnlock()
   558  
   559  	receipts := dlp.dl.peerReceipts[dlp.id]
   560  
   561  	results := make([][]*types.Receipt, 0, len(hashes))
   562  	for _, hash := range hashes {
   563  		if receipt, ok := receipts[hash]; ok {
   564  			results = append(results, receipt)
   565  		}
   566  	}
   567  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   568  
   569  	return nil
   570  }
   571  
   572  // RequestNodeData constructs a getNodeData method associated with a particular
   573  // peer in the download tester. The returned function can be used to retrieve
   574  // batches of node state data from the particularly requested peer.
   575  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   576  	time.Sleep(dlp.delay)
   577  
   578  	dlp.dl.lock.RLock()
   579  	defer dlp.dl.lock.RUnlock()
   580  
   581  	results := make([][]byte, 0, len(hashes))
   582  	for _, hash := range hashes {
   583  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   584  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   585  				results = append(results, data)
   586  			}
   587  		}
   588  	}
   589  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   590  
   591  	return nil
   592  }
   593  
   594  // assertOwnChain checks if the local chain contains the correct number of items
   595  // of the various chain components.
   596  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   597  	assertOwnForkedChain(t, tester, 1, []int{length})
   598  }
   599  
   600  // assertOwnForkedChain checks if the local forked chain contains the correct
   601  // number of items of the various chain components.
   602  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   603  	// Initialize the counters for the first fork
   604  	headers, blocks := lengths[0], lengths[0]
   605  
   606  	minReceipts, maxReceipts := lengths[0]-fsMinFullBlocks-fsPivotInterval, lengths[0]-fsMinFullBlocks
   607  	if minReceipts < 0 {
   608  		minReceipts = 1
   609  	}
   610  	if maxReceipts < 0 {
   611  		maxReceipts = 1
   612  	}
   613  	// Update the counters for each subsequent fork
   614  	for _, length := range lengths[1:] {
   615  		headers += length - common
   616  		blocks += length - common
   617  
   618  		minReceipts += length - common - fsMinFullBlocks - fsPivotInterval
   619  		maxReceipts += length - common - fsMinFullBlocks
   620  	}
   621  	switch tester.downloader.mode {
   622  	case FullSync:
   623  		minReceipts, maxReceipts = 1, 1
   624  	case LightSync:
   625  		blocks, minReceipts, maxReceipts = 1, 1, 1
   626  	}
   627  	if hs := len(tester.ownHeaders); hs != headers {
   628  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   629  	}
   630  	if bs := len(tester.ownBlocks); bs != blocks {
   631  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   632  	}
   633  	if rs := len(tester.ownReceipts); rs < minReceipts || rs > maxReceipts {
   634  		t.Fatalf("synchronised receipts mismatch: have %v, want between [%v, %v]", rs, minReceipts, maxReceipts)
   635  	}
   636  	// Verify the state trie too for fast syncs
   637  	if tester.downloader.mode == FastSync {
   638  		var index int
   639  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   640  			index = pivot
   641  		} else {
   642  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   643  		}
   644  		if index > 0 {
   645  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(tester.stateDb)); statedb == nil || err != nil {
   646  				t.Fatalf("state reconstruction failed: %v", err)
   647  			}
   648  		}
   649  	}
   650  }
   651  
   652  // Tests that simple synchronization against a canonical chain works correctly.
   653  // In this test common ancestor lookup should be short circuited and not require
   654  // binary searching.
   655  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   656  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   657  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   658  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   659  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   660  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   661  
   662  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   663  	t.Parallel()
   664  
   665  	tester := newTester()
   666  	defer tester.terminate()
   667  
   668  	// Create a small enough block chain to download
   669  	targetBlocks := blockCacheLimit - 15
   670  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   671  
   672  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   673  
   674  	// Synchronise with the peer and make sure all relevant data was retrieved
   675  	if err := tester.sync("peer", nil, mode); err != nil {
   676  		t.Fatalf("failed to synchronise blocks: %v", err)
   677  	}
   678  	assertOwnChain(t, tester, targetBlocks+1)
   679  }
   680  
   681  // Tests that if a large batch of blocks are being downloaded, it is throttled
   682  // until the cached blocks are retrieved.
   683  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   684  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   685  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   686  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   687  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   688  
   689  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   690  	tester := newTester()
   691  	defer tester.terminate()
   692  
   693  	// Create a long block chain to download and the tester
   694  	targetBlocks := 8 * blockCacheLimit
   695  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   696  
   697  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   698  
   699  	// Wrap the importer to allow stepping
   700  	blocked, proceed := uint32(0), make(chan struct{})
   701  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   702  		atomic.StoreUint32(&blocked, uint32(len(results)))
   703  		<-proceed
   704  	}
   705  	// Start a synchronisation concurrently
   706  	errc := make(chan error)
   707  	go func() {
   708  		errc <- tester.sync("peer", nil, mode)
   709  	}()
   710  	// Iteratively take some blocks, always checking the retrieval count
   711  	for {
   712  		// Check the retrieval count synchronously (! reason for this ugly block)
   713  		tester.lock.RLock()
   714  		retrieved := len(tester.ownBlocks)
   715  		tester.lock.RUnlock()
   716  		if retrieved >= targetBlocks+1 {
   717  			break
   718  		}
   719  		// Wait a bit for sync to throttle itself
   720  		var cached, frozen int
   721  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   722  			time.Sleep(25 * time.Millisecond)
   723  
   724  			tester.lock.Lock()
   725  			tester.downloader.queue.lock.Lock()
   726  			cached = len(tester.downloader.queue.blockDonePool)
   727  			if mode == FastSync {
   728  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   729  					if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   730  						cached = receipts
   731  					}
   732  				}
   733  			}
   734  			frozen = int(atomic.LoadUint32(&blocked))
   735  			retrieved = len(tester.ownBlocks)
   736  			tester.downloader.queue.lock.Unlock()
   737  			tester.lock.Unlock()
   738  
   739  			if cached == blockCacheLimit || retrieved+cached+frozen == targetBlocks+1 {
   740  				break
   741  			}
   742  		}
   743  		// Make sure we filled up the cache, then exhaust it
   744  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   745  
   746  		tester.lock.RLock()
   747  		retrieved = len(tester.ownBlocks)
   748  		tester.lock.RUnlock()
   749  		if cached != blockCacheLimit && retrieved+cached+frozen != targetBlocks+1 {
   750  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheLimit, retrieved, frozen, targetBlocks+1)
   751  		}
   752  		// Permit the blocked blocks to import
   753  		if atomic.LoadUint32(&blocked) > 0 {
   754  			atomic.StoreUint32(&blocked, uint32(0))
   755  			proceed <- struct{}{}
   756  		}
   757  	}
   758  	// Check that we haven't pulled more blocks than available
   759  	assertOwnChain(t, tester, targetBlocks+1)
   760  	if err := <-errc; err != nil {
   761  		t.Fatalf("block synchronization failed: %v", err)
   762  	}
   763  }
   764  
   765  // Tests that simple synchronization against a forked chain works correctly. In
   766  // this test common ancestor lookup should *not* be short circuited, and a full
   767  // binary search should be executed.
   768  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   769  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   770  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   771  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   772  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   773  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   774  
   775  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   776  	t.Parallel()
   777  
   778  	tester := newTester()
   779  	defer tester.terminate()
   780  
   781  	// Create a long enough forked chain
   782  	common, fork := MaxHashFetch, 2*MaxHashFetch
   783  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   784  
   785  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   786  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   787  
   788  	// Synchronise with the peer and make sure all blocks were retrieved
   789  	if err := tester.sync("fork A", nil, mode); err != nil {
   790  		t.Fatalf("failed to synchronise blocks: %v", err)
   791  	}
   792  	assertOwnChain(t, tester, common+fork+1)
   793  
   794  	// Synchronise with the second peer and make sure that fork is pulled too
   795  	if err := tester.sync("fork B", nil, mode); err != nil {
   796  		t.Fatalf("failed to synchronise blocks: %v", err)
   797  	}
   798  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   799  }
   800  
   801  // Tests that synchronising against a much shorter but much heavyer fork works
   802  // corrently and is not dropped.
   803  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   804  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   805  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   806  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   807  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   808  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   809  
   810  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   811  	t.Parallel()
   812  
   813  	tester := newTester()
   814  	defer tester.terminate()
   815  
   816  	// Create a long enough forked chain
   817  	common, fork := MaxHashFetch, 4*MaxHashFetch
   818  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   819  
   820  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   821  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   822  
   823  	// Synchronise with the peer and make sure all blocks were retrieved
   824  	if err := tester.sync("light", nil, mode); err != nil {
   825  		t.Fatalf("failed to synchronise blocks: %v", err)
   826  	}
   827  	assertOwnChain(t, tester, common+fork+1)
   828  
   829  	// Synchronise with the second peer and make sure that fork is pulled too
   830  	if err := tester.sync("heavy", nil, mode); err != nil {
   831  		t.Fatalf("failed to synchronise blocks: %v", err)
   832  	}
   833  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   834  }
   835  
   836  // Tests that chain forks are contained within a certain interval of the current
   837  // chain head, ensuring that malicious peers cannot waste resources by feeding
   838  // long dead chains.
   839  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   840  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   841  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   842  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   843  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   844  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   845  
   846  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   847  	t.Parallel()
   848  
   849  	tester := newTester()
   850  	defer tester.terminate()
   851  
   852  	// Create a long enough forked chain
   853  	common, fork := 13, int(MaxForkAncestry+17)
   854  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   855  
   856  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   857  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   858  
   859  	// Synchronise with the peer and make sure all blocks were retrieved
   860  	if err := tester.sync("original", nil, mode); err != nil {
   861  		t.Fatalf("failed to synchronise blocks: %v", err)
   862  	}
   863  	assertOwnChain(t, tester, common+fork+1)
   864  
   865  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   866  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   867  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   868  	}
   869  }
   870  
   871  // Tests that chain forks are contained within a certain interval of the current
   872  // chain head for short but heavy forks too. These are a bit special because they
   873  // take different ancestor lookup paths.
   874  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   875  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   876  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   877  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   878  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   879  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   880  
   881  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   882  	t.Parallel()
   883  
   884  	tester := newTester()
   885  	defer tester.terminate()
   886  
   887  	// Create a long enough forked chain
   888  	common, fork := 13, int(MaxForkAncestry+17)
   889  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   890  
   891  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   892  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   893  
   894  	// Synchronise with the peer and make sure all blocks were retrieved
   895  	if err := tester.sync("original", nil, mode); err != nil {
   896  		t.Fatalf("failed to synchronise blocks: %v", err)
   897  	}
   898  	assertOwnChain(t, tester, common+fork+1)
   899  
   900  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   901  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   902  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   903  	}
   904  }
   905  
   906  // Tests that an inactive downloader will not accept incoming block headers and
   907  // bodies.
   908  func TestInactiveDownloader62(t *testing.T) {
   909  	t.Parallel()
   910  
   911  	tester := newTester()
   912  	defer tester.terminate()
   913  
   914  	// Check that neither block headers nor bodies are accepted
   915  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   916  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   917  	}
   918  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   919  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   920  	}
   921  }
   922  
   923  // Tests that an inactive downloader will not accept incoming block headers,
   924  // bodies and receipts.
   925  func TestInactiveDownloader63(t *testing.T) {
   926  	t.Parallel()
   927  
   928  	tester := newTester()
   929  	defer tester.terminate()
   930  
   931  	// Check that neither block headers nor bodies are accepted
   932  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   933  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   934  	}
   935  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   936  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   937  	}
   938  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   939  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   940  	}
   941  }
   942  
   943  // Tests that a canceled download wipes all previously accumulated state.
   944  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   945  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   946  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   947  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   948  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   949  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   950  
   951  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   952  	t.Parallel()
   953  
   954  	tester := newTester()
   955  	defer tester.terminate()
   956  
   957  	// Create a small enough block chain to download and the tester
   958  	targetBlocks := blockCacheLimit - 15
   959  	if targetBlocks >= MaxHashFetch {
   960  		targetBlocks = MaxHashFetch - 15
   961  	}
   962  	if targetBlocks >= MaxHeaderFetch {
   963  		targetBlocks = MaxHeaderFetch - 15
   964  	}
   965  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   966  
   967  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   968  
   969  	// Make sure canceling works with a pristine downloader
   970  	tester.downloader.Cancel()
   971  	if !tester.downloader.queue.Idle() {
   972  		t.Errorf("download queue not idle")
   973  	}
   974  	// Synchronise with the peer, but cancel afterwards
   975  	if err := tester.sync("peer", nil, mode); err != nil {
   976  		t.Fatalf("failed to synchronise blocks: %v", err)
   977  	}
   978  	tester.downloader.Cancel()
   979  	if !tester.downloader.queue.Idle() {
   980  		t.Errorf("download queue not idle")
   981  	}
   982  }
   983  
   984  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   985  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   986  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   987  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   988  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   989  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   990  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   991  
   992  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   993  	t.Parallel()
   994  
   995  	tester := newTester()
   996  	defer tester.terminate()
   997  
   998  	// Create various peers with various parts of the chain
   999  	targetPeers := 8
  1000  	targetBlocks := targetPeers*blockCacheLimit - 15
  1001  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1002  
  1003  	for i := 0; i < targetPeers; i++ {
  1004  		id := fmt.Sprintf("peer #%d", i)
  1005  		tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts)
  1006  	}
  1007  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1008  		t.Fatalf("failed to synchronise blocks: %v", err)
  1009  	}
  1010  	assertOwnChain(t, tester, targetBlocks+1)
  1011  }
  1012  
  1013  // Tests that synchronisations behave well in multi-version protocol environments
  1014  // and not wreak havoc on other nodes in the network.
  1015  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1016  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1017  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1018  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1019  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1020  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1021  
  1022  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1023  	t.Parallel()
  1024  
  1025  	tester := newTester()
  1026  	defer tester.terminate()
  1027  
  1028  	// Create a small enough block chain to download
  1029  	targetBlocks := blockCacheLimit - 15
  1030  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1031  
  1032  	// Create peers of every type
  1033  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1034  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1035  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1036  
  1037  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1038  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1039  		t.Fatalf("failed to synchronise blocks: %v", err)
  1040  	}
  1041  	assertOwnChain(t, tester, targetBlocks+1)
  1042  
  1043  	// Check that no peers have been dropped off
  1044  	for _, version := range []int{62, 63, 64} {
  1045  		peer := fmt.Sprintf("peer %d", version)
  1046  		if _, ok := tester.peerHashes[peer]; !ok {
  1047  			t.Errorf("%s dropped", peer)
  1048  		}
  1049  	}
  1050  }
  1051  
  1052  // Tests that if a block is empty (e.g. header only), no body request should be
  1053  // made, and instead the header should be assembled into a whole block in itself.
  1054  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1055  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1056  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1057  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1058  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1059  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1060  
  1061  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1062  	t.Parallel()
  1063  
  1064  	tester := newTester()
  1065  	defer tester.terminate()
  1066  
  1067  	// Create a block chain to download
  1068  	targetBlocks := 2*blockCacheLimit - 15
  1069  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1070  
  1071  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1072  
  1073  	// Instrument the downloader to signal body requests
  1074  	bodiesHave, receiptsHave := int32(0), int32(0)
  1075  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1076  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1077  	}
  1078  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1079  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1080  	}
  1081  	// Synchronise with the peer and make sure all blocks were retrieved
  1082  	if err := tester.sync("peer", nil, mode); err != nil {
  1083  		t.Fatalf("failed to synchronise blocks: %v", err)
  1084  	}
  1085  	assertOwnChain(t, tester, targetBlocks+1)
  1086  
  1087  	// Validate the number of block bodies that should have been requested
  1088  	bodiesNeeded, receiptsNeeded := 0, 0
  1089  	for _, block := range blocks {
  1090  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1091  			bodiesNeeded++
  1092  		}
  1093  	}
  1094  	for hash, receipt := range receipts {
  1095  		if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= tester.downloader.queue.fastSyncPivot {
  1096  			receiptsNeeded++
  1097  		}
  1098  	}
  1099  	if int(bodiesHave) != bodiesNeeded {
  1100  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1101  	}
  1102  	if int(receiptsHave) != receiptsNeeded {
  1103  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1104  	}
  1105  }
  1106  
  1107  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1108  // stalling the downloader by feeding gapped header chains.
  1109  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1110  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1111  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1112  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1113  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1114  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1115  
  1116  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1117  	t.Parallel()
  1118  
  1119  	tester := newTester()
  1120  	defer tester.terminate()
  1121  
  1122  	// Create a small enough block chain to download
  1123  	targetBlocks := blockCacheLimit - 15
  1124  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1125  
  1126  	// Attempt a full sync with an attacker feeding gapped headers
  1127  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1128  	missing := targetBlocks / 2
  1129  	delete(tester.peerHeaders["attack"], hashes[missing])
  1130  
  1131  	if err := tester.sync("attack", nil, mode); err == nil {
  1132  		t.Fatalf("succeeded attacker synchronisation")
  1133  	}
  1134  	// Synchronise with the valid peer and make sure sync succeeds
  1135  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1136  	if err := tester.sync("valid", nil, mode); err != nil {
  1137  		t.Fatalf("failed to synchronise blocks: %v", err)
  1138  	}
  1139  	assertOwnChain(t, tester, targetBlocks+1)
  1140  }
  1141  
  1142  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1143  // detects the invalid numbering.
  1144  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1145  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1146  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1147  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1148  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1149  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1150  
  1151  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1152  	tester := newTester()
  1153  	defer tester.terminate()
  1154  
  1155  	// Create a small enough block chain to download
  1156  	targetBlocks := blockCacheLimit - 15
  1157  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1158  
  1159  	// Attempt a full sync with an attacker feeding shifted headers
  1160  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1161  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1162  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1163  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1164  
  1165  	if err := tester.sync("attack", nil, mode); err == nil {
  1166  		t.Fatalf("succeeded attacker synchronisation")
  1167  	}
  1168  	// Synchronise with the valid peer and make sure sync succeeds
  1169  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1170  	if err := tester.sync("valid", nil, mode); err != nil {
  1171  		t.Fatalf("failed to synchronise blocks: %v", err)
  1172  	}
  1173  	assertOwnChain(t, tester, targetBlocks+1)
  1174  }
  1175  
  1176  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1177  // for various failure scenarios. Afterwards a full sync is attempted to make
  1178  // sure no state was corrupted.
  1179  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1180  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1181  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1182  
  1183  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1184  	tester := newTester()
  1185  	defer tester.terminate()
  1186  
  1187  	// Create a small enough block chain to download
  1188  	targetBlocks := 3*fsHeaderSafetyNet + fsPivotInterval + fsMinFullBlocks
  1189  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1190  
  1191  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1192  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1193  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1194  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1195  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1196  
  1197  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1198  		t.Fatalf("succeeded fast attacker synchronisation")
  1199  	}
  1200  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1201  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1202  	}
  1203  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1204  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1205  	// rolled back, and also the pivot point being reverted to a non-block status.
  1206  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1207  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1208  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1209  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1210  
  1211  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1212  		t.Fatalf("succeeded block attacker synchronisation")
  1213  	}
  1214  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1215  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1216  	}
  1217  	if mode == FastSync {
  1218  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1219  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1220  		}
  1221  	}
  1222  	// Attempt to sync with an attacker that withholds promised blocks after the
  1223  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1224  	// but already imported pivot block.
  1225  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1226  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1227  
  1228  	tester.downloader.fsPivotFails = 0
  1229  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1230  		for i := missing; i <= len(hashes); i++ {
  1231  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1232  		}
  1233  		tester.downloader.syncInitHook = nil
  1234  	}
  1235  
  1236  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1237  		t.Fatalf("succeeded withholding attacker synchronisation")
  1238  	}
  1239  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1240  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1241  	}
  1242  	if mode == FastSync {
  1243  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1244  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1245  		}
  1246  	}
  1247  	tester.downloader.fsPivotFails = fsCriticalTrials
  1248  
  1249  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1250  	// rollback should also disable fast syncing for this process, verify that we
  1251  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1252  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1253  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1254  	if err := tester.sync("valid", nil, mode); err != nil {
  1255  		t.Fatalf("failed to synchronise blocks: %v", err)
  1256  	}
  1257  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1258  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1259  	}
  1260  	if mode != LightSync {
  1261  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1262  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1263  		}
  1264  	}
  1265  }
  1266  
  1267  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1268  // afterwards by not sending any useful hashes.
  1269  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1270  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1271  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1272  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1273  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1274  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1275  
  1276  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1277  	t.Parallel()
  1278  
  1279  	tester := newTester()
  1280  	defer tester.terminate()
  1281  
  1282  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1283  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1284  
  1285  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1286  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1287  	}
  1288  }
  1289  
  1290  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1291  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1292  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1293  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1294  
  1295  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1296  	// Define the disconnection requirement for individual hash fetch errors
  1297  	tests := []struct {
  1298  		result error
  1299  		drop   bool
  1300  	}{
  1301  		{nil, false},                        // Sync succeeded, all is well
  1302  		{errBusy, false},                    // Sync is already in progress, no problem
  1303  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1304  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1305  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1306  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1307  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1308  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1309  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1310  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1311  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1312  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1313  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1314  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1315  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1316  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1317  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1318  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1319  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1320  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1321  	}
  1322  	// Run the tests and check disconnection status
  1323  	tester := newTester()
  1324  	defer tester.terminate()
  1325  
  1326  	for i, tt := range tests {
  1327  		// Register a new peer and ensure it's presence
  1328  		id := fmt.Sprintf("test %d", i)
  1329  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1330  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1331  		}
  1332  		if _, ok := tester.peerHashes[id]; !ok {
  1333  			t.Fatalf("test %d: registered peer not found", i)
  1334  		}
  1335  		// Simulate a synchronisation and check the required result
  1336  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1337  
  1338  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1339  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1340  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1341  		}
  1342  	}
  1343  }
  1344  
  1345  // Tests that synchronisation progress (origin block number, current block number
  1346  // and highest block number) is tracked and updated correctly.
  1347  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1348  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1349  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1350  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1351  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1352  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1353  
  1354  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1355  	t.Parallel()
  1356  
  1357  	tester := newTester()
  1358  	defer tester.terminate()
  1359  
  1360  	// Create a small enough block chain to download
  1361  	targetBlocks := blockCacheLimit - 15
  1362  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1363  
  1364  	// Set a sync init hook to catch progress changes
  1365  	starting := make(chan struct{})
  1366  	progress := make(chan struct{})
  1367  
  1368  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1369  		starting <- struct{}{}
  1370  		<-progress
  1371  	}
  1372  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1373  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1374  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1375  	}
  1376  	// Synchronise half the blocks and check initial progress
  1377  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1378  	pending := new(sync.WaitGroup)
  1379  	pending.Add(1)
  1380  
  1381  	go func() {
  1382  		defer pending.Done()
  1383  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1384  			t.Fatalf("failed to synchronise blocks: %v", err)
  1385  		}
  1386  	}()
  1387  	<-starting
  1388  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1389  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1390  	}
  1391  	progress <- struct{}{}
  1392  	pending.Wait()
  1393  
  1394  	// Synchronise all the blocks and check continuation progress
  1395  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1396  	pending.Add(1)
  1397  
  1398  	go func() {
  1399  		defer pending.Done()
  1400  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1401  			t.Fatalf("failed to synchronise blocks: %v", err)
  1402  		}
  1403  	}()
  1404  	<-starting
  1405  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1406  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1407  	}
  1408  	progress <- struct{}{}
  1409  	pending.Wait()
  1410  
  1411  	// Check final progress after successful sync
  1412  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1413  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1414  	}
  1415  }
  1416  
  1417  // Tests that synchronisation progress (origin block number and highest block
  1418  // number) is tracked and updated correctly in case of a fork (or manual head
  1419  // revertal).
  1420  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1421  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1422  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1423  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1424  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1425  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1426  
  1427  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1428  	t.Parallel()
  1429  
  1430  	tester := newTester()
  1431  	defer tester.terminate()
  1432  
  1433  	// Create a forked chain to simulate origin revertal
  1434  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1435  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1436  
  1437  	// Set a sync init hook to catch progress changes
  1438  	starting := make(chan struct{})
  1439  	progress := make(chan struct{})
  1440  
  1441  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1442  		starting <- struct{}{}
  1443  		<-progress
  1444  	}
  1445  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1446  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1447  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1448  	}
  1449  	// Synchronise with one of the forks and check progress
  1450  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1451  	pending := new(sync.WaitGroup)
  1452  	pending.Add(1)
  1453  
  1454  	go func() {
  1455  		defer pending.Done()
  1456  		if err := tester.sync("fork A", nil, mode); err != nil {
  1457  			t.Fatalf("failed to synchronise blocks: %v", err)
  1458  		}
  1459  	}()
  1460  	<-starting
  1461  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1462  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1463  	}
  1464  	progress <- struct{}{}
  1465  	pending.Wait()
  1466  
  1467  	// Simulate a successful sync above the fork
  1468  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1469  
  1470  	// Synchronise with the second fork and check progress resets
  1471  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1472  	pending.Add(1)
  1473  
  1474  	go func() {
  1475  		defer pending.Done()
  1476  		if err := tester.sync("fork B", nil, mode); err != nil {
  1477  			t.Fatalf("failed to synchronise blocks: %v", err)
  1478  		}
  1479  	}()
  1480  	<-starting
  1481  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1482  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1483  	}
  1484  	progress <- struct{}{}
  1485  	pending.Wait()
  1486  
  1487  	// Check final progress after successful sync
  1488  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1489  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1490  	}
  1491  }
  1492  
  1493  // Tests that if synchronisation is aborted due to some failure, then the progress
  1494  // origin is not updated in the next sync cycle, as it should be considered the
  1495  // continuation of the previous sync and not a new instance.
  1496  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1497  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1498  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1499  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1500  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1501  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1502  
  1503  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1504  	t.Parallel()
  1505  
  1506  	tester := newTester()
  1507  	defer tester.terminate()
  1508  
  1509  	// Create a small enough block chain to download
  1510  	targetBlocks := blockCacheLimit - 15
  1511  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1512  
  1513  	// Set a sync init hook to catch progress changes
  1514  	starting := make(chan struct{})
  1515  	progress := make(chan struct{})
  1516  
  1517  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1518  		starting <- struct{}{}
  1519  		<-progress
  1520  	}
  1521  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1522  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1523  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1524  	}
  1525  	// Attempt a full sync with a faulty peer
  1526  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1527  	missing := targetBlocks / 2
  1528  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1529  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1530  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1531  
  1532  	pending := new(sync.WaitGroup)
  1533  	pending.Add(1)
  1534  
  1535  	go func() {
  1536  		defer pending.Done()
  1537  		if err := tester.sync("faulty", nil, mode); err == nil {
  1538  			t.Fatalf("succeeded faulty synchronisation")
  1539  		}
  1540  	}()
  1541  	<-starting
  1542  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1543  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1544  	}
  1545  	progress <- struct{}{}
  1546  	pending.Wait()
  1547  
  1548  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1549  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1550  	pending.Add(1)
  1551  
  1552  	go func() {
  1553  		defer pending.Done()
  1554  		if err := tester.sync("valid", nil, mode); err != nil {
  1555  			t.Fatalf("failed to synchronise blocks: %v", err)
  1556  		}
  1557  	}()
  1558  	<-starting
  1559  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1560  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1561  	}
  1562  	progress <- struct{}{}
  1563  	pending.Wait()
  1564  
  1565  	// Check final progress after successful sync
  1566  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1567  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1568  	}
  1569  }
  1570  
  1571  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1572  // the progress height is successfully reduced at the next sync invocation.
  1573  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1574  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1575  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1576  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1577  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1578  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1579  
  1580  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1581  	t.Parallel()
  1582  
  1583  	tester := newTester()
  1584  	defer tester.terminate()
  1585  
  1586  	// Create a small block chain
  1587  	targetBlocks := blockCacheLimit - 15
  1588  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1589  
  1590  	// Set a sync init hook to catch progress changes
  1591  	starting := make(chan struct{})
  1592  	progress := make(chan struct{})
  1593  
  1594  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1595  		starting <- struct{}{}
  1596  		<-progress
  1597  	}
  1598  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1599  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1600  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1601  	}
  1602  	//  Create and sync with an attacker that promises a higher chain than available
  1603  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1604  	for i := 1; i < 3; i++ {
  1605  		delete(tester.peerHeaders["attack"], hashes[i])
  1606  		delete(tester.peerBlocks["attack"], hashes[i])
  1607  		delete(tester.peerReceipts["attack"], hashes[i])
  1608  	}
  1609  
  1610  	pending := new(sync.WaitGroup)
  1611  	pending.Add(1)
  1612  
  1613  	go func() {
  1614  		defer pending.Done()
  1615  		if err := tester.sync("attack", nil, mode); err == nil {
  1616  			t.Fatalf("succeeded attacker synchronisation")
  1617  		}
  1618  	}()
  1619  	<-starting
  1620  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1621  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1622  	}
  1623  	progress <- struct{}{}
  1624  	pending.Wait()
  1625  
  1626  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1627  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1628  	pending.Add(1)
  1629  
  1630  	go func() {
  1631  		defer pending.Done()
  1632  		if err := tester.sync("valid", nil, mode); err != nil {
  1633  			t.Fatalf("failed to synchronise blocks: %v", err)
  1634  		}
  1635  	}()
  1636  	<-starting
  1637  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1638  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1639  	}
  1640  	progress <- struct{}{}
  1641  	pending.Wait()
  1642  
  1643  	// Check final progress after successful sync
  1644  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1645  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1646  	}
  1647  }
  1648  
  1649  // This test reproduces an issue where unexpected deliveries would
  1650  // block indefinitely if they arrived at the right time.
  1651  func TestDeliverHeadersHang62(t *testing.T)      { testDeliverHeadersHang(t, 62, FullSync) }
  1652  func TestDeliverHeadersHang63Full(t *testing.T)  { testDeliverHeadersHang(t, 63, FullSync) }
  1653  func TestDeliverHeadersHang63Fast(t *testing.T)  { testDeliverHeadersHang(t, 63, FastSync) }
  1654  func TestDeliverHeadersHang64Full(t *testing.T)  { testDeliverHeadersHang(t, 64, FullSync) }
  1655  func TestDeliverHeadersHang64Fast(t *testing.T)  { testDeliverHeadersHang(t, 64, FastSync) }
  1656  func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
  1657  
  1658  type floodingTestPeer struct {
  1659  	peer   Peer
  1660  	tester *downloadTester
  1661  }
  1662  
  1663  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1664  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1665  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1666  }
  1667  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1668  	return ftp.peer.RequestBodies(hashes)
  1669  }
  1670  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1671  	return ftp.peer.RequestReceipts(hashes)
  1672  }
  1673  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1674  	return ftp.peer.RequestNodeData(hashes)
  1675  }
  1676  
  1677  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1678  	deliveriesDone := make(chan struct{}, 500)
  1679  	for i := 0; i < cap(deliveriesDone); i++ {
  1680  		peer := fmt.Sprintf("fake-peer%d", i)
  1681  		go func() {
  1682  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1683  			deliveriesDone <- struct{}{}
  1684  		}()
  1685  	}
  1686  	// Deliver the actual requested headers.
  1687  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1688  	// None of the extra deliveries should block.
  1689  	timeout := time.After(15 * time.Second)
  1690  	for i := 0; i < cap(deliveriesDone); i++ {
  1691  		select {
  1692  		case <-deliveriesDone:
  1693  		case <-timeout:
  1694  			panic("blocked")
  1695  		}
  1696  	}
  1697  	return nil
  1698  }
  1699  
  1700  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1701  	t.Parallel()
  1702  
  1703  	master := newTester()
  1704  	defer master.terminate()
  1705  
  1706  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1707  	for i := 0; i < 200; i++ {
  1708  		tester := newTester()
  1709  		tester.peerDb = master.peerDb
  1710  
  1711  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1712  		// Whenever the downloader requests headers, flood it with
  1713  		// a lot of unrequested header deliveries.
  1714  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1715  			tester.downloader.peers.peers["peer"].peer,
  1716  			tester,
  1717  		}
  1718  
  1719  		if err := tester.sync("peer", nil, mode); err != nil {
  1720  			t.Errorf("sync failed: %v", err)
  1721  		}
  1722  		tester.terminate()
  1723  	}
  1724  }
  1725  
  1726  // Tests that if fast sync aborts in the critical section, it can restart a few
  1727  // times before giving up.
  1728  func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
  1729  func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
  1730  func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
  1731  func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
  1732  
  1733  func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
  1734  	tester := newTester()
  1735  	defer tester.terminate()
  1736  
  1737  	// Create a large enough blockchin to actually fast sync on
  1738  	targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15
  1739  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1740  
  1741  	// Create a tester peer with a critical section header missing (force failures)
  1742  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1743  	delete(tester.peerHeaders["peer"], hashes[fsMinFullBlocks-1])
  1744  	tester.downloader.dropPeer = func(id string) {} // We reuse the same "faulty" peer throughout the test
  1745  
  1746  	// Remove all possible pivot state roots and slow down replies (test failure resets later)
  1747  	for i := 0; i < fsPivotInterval; i++ {
  1748  		tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
  1749  	}
  1750  	(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).delay = 500 * time.Millisecond // Enough to reach the critical section
  1751  
  1752  	// Synchronise with the peer a few times and make sure they fail until the retry limit
  1753  	for i := 0; i < int(fsCriticalTrials)-1; i++ {
  1754  		// Attempt a sync and ensure it fails properly
  1755  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1756  			t.Fatalf("failing fast sync succeeded: %v", err)
  1757  		}
  1758  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1759  
  1760  		// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
  1761  		if i == 0 {
  1762  			if tester.downloader.fsPivotLock == nil {
  1763  				time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
  1764  				t.Fatalf("pivot block not locked in after critical section failure")
  1765  			}
  1766  			tester.lock.Lock()
  1767  			tester.peerHeaders["peer"][hashes[fsMinFullBlocks-1]] = headers[hashes[fsMinFullBlocks-1]]
  1768  			tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true}
  1769  			(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).delay = 0
  1770  			tester.lock.Unlock()
  1771  		}
  1772  	}
  1773  	// Return all nodes if we're testing fast sync progression
  1774  	if progress {
  1775  		tester.lock.Lock()
  1776  		tester.peerMissingStates["peer"] = map[common.Hash]bool{}
  1777  		tester.lock.Unlock()
  1778  
  1779  		if err := tester.sync("peer", nil, FastSync); err != nil {
  1780  			t.Fatalf("failed to synchronise blocks in progressed fast sync: %v", err)
  1781  		}
  1782  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1783  
  1784  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != 1 {
  1785  			t.Fatalf("progressed pivot trial count mismatch: have %v, want %v", fails, 1)
  1786  		}
  1787  		assertOwnChain(t, tester, targetBlocks+1)
  1788  	} else {
  1789  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1790  			t.Fatalf("succeeded to synchronise blocks in failed fast sync")
  1791  		}
  1792  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1793  
  1794  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != fsCriticalTrials {
  1795  			t.Fatalf("failed pivot trial count mismatch: have %v, want %v", fails, fsCriticalTrials)
  1796  		}
  1797  	}
  1798  	// Retry limit exhausted, downloader will switch to full sync, should succeed
  1799  	if err := tester.sync("peer", nil, FastSync); err != nil {
  1800  		t.Fatalf("failed to synchronise blocks in slow sync: %v", err)
  1801  	}
  1802  	// Note, we can't assert the chain here because the test asserter assumes sync
  1803  	// completed using a single mode of operation, whereas fast-then-slow can result
  1804  	// in arbitrary intermediate state that's not cleanly verifiable.
  1805  }