github.com/Elemental-core/elementalcore@v0.0.0-20191206075037-63891242267a/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The elementalcore Authors
     2  // This file is part of the elementalcore library.
     3  //
     4  // The elementalcore library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The elementalcore library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the elementalcore library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/Elemental-core/elementalcore/common"
    29  	"github.com/Elemental-core/elementalcore/core"
    30  	"github.com/Elemental-core/elementalcore/core/state"
    31  	"github.com/Elemental-core/elementalcore/core/types"
    32  	"github.com/Elemental-core/elementalcore/crypto"
    33  	"github.com/Elemental-core/elementalcore/ethdb"
    34  	"github.com/Elemental-core/elementalcore/event"
    35  	"github.com/Elemental-core/elementalcore/params"
    36  	"github.com/Elemental-core/elementalcore/trie"
    37  )
    38  
    39  var (
    40  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    41  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    42  )
    43  
    44  // Reduce some of the parameters to make the tester faster.
    45  func init() {
    46  	MaxForkAncestry = uint64(10000)
    47  	blockCacheLimit = 1024
    48  	fsCriticalTrials = 10
    49  }
    50  
    51  // downloadTester is a test simulator for mocking out local block chain.
    52  type downloadTester struct {
    53  	downloader *Downloader
    54  
    55  	genesis *types.Block   // Genesis blocks used by the tester and peers
    56  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    57  	peerDb  ethdb.Database // Database of the peers containing all data
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    66  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    67  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    68  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    69  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    70  
    71  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    72  
    73  	lock sync.RWMutex
    74  }
    75  
    76  // newTester creates a new downloader test mocker.
    77  func newTester() *downloadTester {
    78  	testdb, _ := ethdb.NewMemDatabase()
    79  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    80  
    81  	tester := &downloadTester{
    82  		genesis:           genesis,
    83  		peerDb:            testdb,
    84  		ownHashes:         []common.Hash{genesis.Hash()},
    85  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    86  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    87  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    88  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    89  		peerHashes:        make(map[string][]common.Hash),
    90  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    91  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    92  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    93  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    94  		peerMissingStates: make(map[string]map[common.Hash]bool),
    95  	}
    96  	tester.stateDb, _ = ethdb.NewMemDatabase()
    97  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    98  
    99  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   100  
   101  	return tester
   102  }
   103  
   104  // makeChain creates a chain of n blocks starting at and including parent.
   105  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   106  // contains a transaction and every 5th an uncle to allow testing correct block
   107  // reassembly.
   108  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   109  	// Generate the block chain
   110  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, dl.peerDb, n, func(i int, block *core.BlockGen) {
   111  		block.SetCoinbase(common.Address{seed})
   112  
   113  		// If a heavy chain is requested, delay blocks to raise difficulty
   114  		if heavy {
   115  			block.OffsetTime(-1)
   116  		}
   117  		// If the block number is multiple of 3, send a bonus transaction to the miner
   118  		if parent == dl.genesis && i%3 == 0 {
   119  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   120  			tx, err := types.SignTx(types.NewTransaction(types.Binary, block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), new(big.Int).SetUint64(params.TxGas), nil, nil), signer, testKey)
   121  			if err != nil {
   122  				panic(err)
   123  			}
   124  			block.AddTx(tx)
   125  		}
   126  		// If the block number is a multiple of 5, add a bonus uncle to the block
   127  		if i > 0 && i%5 == 0 {
   128  			block.AddUncle(&types.Header{
   129  				ParentHash:  block.PrevBlock(i - 1).Hash(),
   130  				Number:      big.NewInt(block.Number().Int64() - 1),
   131  				DposContext: &types.DposContextProto{},
   132  			})
   133  		}
   134  	})
   135  	// Convert the block-chain into a hash-chain and header/block maps
   136  	hashes := make([]common.Hash, n+1)
   137  	hashes[len(hashes)-1] = parent.Hash()
   138  
   139  	headerm := make(map[common.Hash]*types.Header, n+1)
   140  	headerm[parent.Hash()] = parent.Header()
   141  
   142  	blockm := make(map[common.Hash]*types.Block, n+1)
   143  	blockm[parent.Hash()] = parent
   144  
   145  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   146  	receiptm[parent.Hash()] = parentReceipts
   147  
   148  	for i, b := range blocks {
   149  		hashes[len(hashes)-i-2] = b.Hash()
   150  		headerm[b.Hash()] = b.Header()
   151  		blockm[b.Hash()] = b
   152  		receiptm[b.Hash()] = receipts[i]
   153  	}
   154  	return hashes, headerm, blockm, receiptm
   155  }
   156  
   157  // makeChainFork creates two chains of length n, such that h1[:f] and
   158  // h2[:f] are different but have a common suffix of length n-f.
   159  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   160  	// Create the common suffix
   161  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   162  
   163  	// Create the forks, making the second heavyer if non balanced forks were requested
   164  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   165  	hashes1 = append(hashes1, hashes[1:]...)
   166  
   167  	heavy := false
   168  	if !balanced {
   169  		heavy = true
   170  	}
   171  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   172  	hashes2 = append(hashes2, hashes[1:]...)
   173  
   174  	for hash, header := range headers {
   175  		headers1[hash] = header
   176  		headers2[hash] = header
   177  	}
   178  	for hash, block := range blocks {
   179  		blocks1[hash] = block
   180  		blocks2[hash] = block
   181  	}
   182  	for hash, receipt := range receipts {
   183  		receipts1[hash] = receipt
   184  		receipts2[hash] = receipt
   185  	}
   186  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   187  }
   188  
   189  // terminate aborts any operations on the embedded downloader and releases all
   190  // held resources.
   191  func (dl *downloadTester) terminate() {
   192  	dl.downloader.Terminate()
   193  }
   194  
   195  // sync starts synchronizing with a remote peer, blocking until it completes.
   196  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   197  	dl.lock.RLock()
   198  	hash := dl.peerHashes[id][0]
   199  	// If no particular TD was requested, load from the peer's blockchain
   200  	if td == nil {
   201  		td = big.NewInt(1)
   202  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   203  			td = diff
   204  		}
   205  	}
   206  	dl.lock.RUnlock()
   207  
   208  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   209  	err := dl.downloader.synchronise(id, hash, td, mode)
   210  	select {
   211  	case <-dl.downloader.cancelCh:
   212  		// Ok, downloader fully cancelled after sync cycle
   213  	default:
   214  		// Downloader is still accepting packets, can block a peer up
   215  		panic("downloader active post sync cycle") // panic will be caught by tester
   216  	}
   217  	return err
   218  }
   219  
   220  // HasHeader checks if a header is present in the testers canonical chain.
   221  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   222  	return dl.GetHeaderByHash(hash) != nil
   223  }
   224  
   225  // HasBlockAndState checks if a block and associated state is present in the testers canonical chain.
   226  func (dl *downloadTester) HasBlockAndState(hash common.Hash) bool {
   227  	block := dl.GetBlockByHash(hash)
   228  	if block == nil {
   229  		return false
   230  	}
   231  	_, err := dl.stateDb.Get(block.Root().Bytes())
   232  	return err == nil
   233  }
   234  
   235  // GetHeader retrieves a header from the testers canonical chain.
   236  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   237  	dl.lock.RLock()
   238  	defer dl.lock.RUnlock()
   239  
   240  	return dl.ownHeaders[hash]
   241  }
   242  
   243  // GetBlock retrieves a block from the testers canonical chain.
   244  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   245  	dl.lock.RLock()
   246  	defer dl.lock.RUnlock()
   247  
   248  	return dl.ownBlocks[hash]
   249  }
   250  
   251  // CurrentHeader retrieves the current head header from the canonical chain.
   252  func (dl *downloadTester) CurrentHeader() *types.Header {
   253  	dl.lock.RLock()
   254  	defer dl.lock.RUnlock()
   255  
   256  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   257  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   258  			return header
   259  		}
   260  	}
   261  	return dl.genesis.Header()
   262  }
   263  
   264  // CurrentBlock retrieves the current head block from the canonical chain.
   265  func (dl *downloadTester) CurrentBlock() *types.Block {
   266  	dl.lock.RLock()
   267  	defer dl.lock.RUnlock()
   268  
   269  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   270  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   271  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   272  				return block
   273  			}
   274  		}
   275  	}
   276  	return dl.genesis
   277  }
   278  
   279  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   280  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   281  	dl.lock.RLock()
   282  	defer dl.lock.RUnlock()
   283  
   284  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   285  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   286  			return block
   287  		}
   288  	}
   289  	return dl.genesis
   290  }
   291  
   292  // FastSyncCommitHead manually sets the head block to a given hash.
   293  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   294  	// For now only check that the state trie is correct
   295  	if block := dl.GetBlockByHash(hash); block != nil {
   296  		_, err := trie.NewSecure(block.Root(), dl.stateDb, 0)
   297  		return err
   298  	}
   299  	return fmt.Errorf("non existent block: %x", hash[:4])
   300  }
   301  
   302  // GetTdByHash retrieves the block's total difficulty from the canonical chain.
   303  func (dl *downloadTester) GetTdByHash(hash common.Hash) *big.Int {
   304  	dl.lock.RLock()
   305  	defer dl.lock.RUnlock()
   306  
   307  	return dl.ownChainTd[hash]
   308  }
   309  
   310  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   311  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   312  	dl.lock.Lock()
   313  	defer dl.lock.Unlock()
   314  
   315  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   316  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   317  		return 0, errors.New("unknown parent")
   318  	}
   319  	for i := 1; i < len(headers); i++ {
   320  		if headers[i].ParentHash != headers[i-1].Hash() {
   321  			return i, errors.New("unknown parent")
   322  		}
   323  	}
   324  	// Do a full insert if pre-checks passed
   325  	for i, header := range headers {
   326  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   327  			continue
   328  		}
   329  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   330  			return i, errors.New("unknown parent")
   331  		}
   332  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   333  		dl.ownHeaders[header.Hash()] = header
   334  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   335  	}
   336  	return len(headers), nil
   337  }
   338  
   339  // InsertChain injects a new batch of blocks into the simulated chain.
   340  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   341  	dl.lock.Lock()
   342  	defer dl.lock.Unlock()
   343  
   344  	for i, block := range blocks {
   345  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   346  			return i, errors.New("unknown parent")
   347  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   348  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   349  		}
   350  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   351  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   352  			dl.ownHeaders[block.Hash()] = block.Header()
   353  		}
   354  		dl.ownBlocks[block.Hash()] = block
   355  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   356  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   357  	}
   358  	return len(blocks), nil
   359  }
   360  
   361  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   362  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   363  	dl.lock.Lock()
   364  	defer dl.lock.Unlock()
   365  
   366  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   367  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   368  			return i, errors.New("unknown owner")
   369  		}
   370  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   371  			return i, errors.New("unknown parent")
   372  		}
   373  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   374  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   375  	}
   376  	return len(blocks), nil
   377  }
   378  
   379  // Rollback removes some recently added elements from the chain.
   380  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   381  	dl.lock.Lock()
   382  	defer dl.lock.Unlock()
   383  
   384  	for i := len(hashes) - 1; i >= 0; i-- {
   385  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   386  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   387  		}
   388  		delete(dl.ownChainTd, hashes[i])
   389  		delete(dl.ownHeaders, hashes[i])
   390  		delete(dl.ownReceipts, hashes[i])
   391  		delete(dl.ownBlocks, hashes[i])
   392  	}
   393  }
   394  
   395  // newPeer registers a new block download source into the downloader.
   396  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   397  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   398  }
   399  
   400  // newSlowPeer registers a new block download source into the downloader, with a
   401  // specific delay time on processing the network packets sent to it, simulating
   402  // potentially slow network IO.
   403  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   404  	dl.lock.Lock()
   405  	defer dl.lock.Unlock()
   406  
   407  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   408  	if err == nil {
   409  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   410  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   411  		copy(dl.peerHashes[id], hashes)
   412  
   413  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   414  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   415  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   416  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   417  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   418  
   419  		genesis := hashes[len(hashes)-1]
   420  		if header := headers[genesis]; header != nil {
   421  			dl.peerHeaders[id][genesis] = header
   422  			dl.peerChainTds[id][genesis] = header.Difficulty
   423  		}
   424  		if block := blocks[genesis]; block != nil {
   425  			dl.peerBlocks[id][genesis] = block
   426  			dl.peerChainTds[id][genesis] = block.Difficulty()
   427  		}
   428  
   429  		for i := len(hashes) - 2; i >= 0; i-- {
   430  			hash := hashes[i]
   431  
   432  			if header, ok := headers[hash]; ok {
   433  				dl.peerHeaders[id][hash] = header
   434  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   435  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   436  				}
   437  			}
   438  			if block, ok := blocks[hash]; ok {
   439  				dl.peerBlocks[id][hash] = block
   440  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   441  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   442  				}
   443  			}
   444  			if receipt, ok := receipts[hash]; ok {
   445  				dl.peerReceipts[id][hash] = receipt
   446  			}
   447  		}
   448  	}
   449  	return err
   450  }
   451  
   452  // dropPeer simulates a hard peer removal from the connection pool.
   453  func (dl *downloadTester) dropPeer(id string) {
   454  	dl.lock.Lock()
   455  	defer dl.lock.Unlock()
   456  
   457  	delete(dl.peerHashes, id)
   458  	delete(dl.peerHeaders, id)
   459  	delete(dl.peerBlocks, id)
   460  	delete(dl.peerChainTds, id)
   461  
   462  	dl.downloader.UnregisterPeer(id)
   463  }
   464  
   465  type downloadTesterPeer struct {
   466  	dl    *downloadTester
   467  	id    string
   468  	delay time.Duration
   469  	lock  sync.RWMutex
   470  }
   471  
   472  // setDelay is a thread safe setter for the network delay value.
   473  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   474  	dlp.lock.Lock()
   475  	defer dlp.lock.Unlock()
   476  
   477  	dlp.delay = delay
   478  }
   479  
   480  // waitDelay is a thread safe way to sleep for the configured time.
   481  func (dlp *downloadTesterPeer) waitDelay() {
   482  	dlp.lock.RLock()
   483  	delay := dlp.delay
   484  	dlp.lock.RUnlock()
   485  
   486  	time.Sleep(delay)
   487  }
   488  
   489  // Head constructs a function to retrieve a peer's current head hash
   490  // and total difficulty.
   491  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   492  	dlp.dl.lock.RLock()
   493  	defer dlp.dl.lock.RUnlock()
   494  
   495  	return dlp.dl.peerHashes[dlp.id][0], nil
   496  }
   497  
   498  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   499  // origin; associated with a particular peer in the download tester. The returned
   500  // function can be used to retrieve batches of headers from the particular peer.
   501  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   502  	// Find the canonical number of the hash
   503  	dlp.dl.lock.RLock()
   504  	number := uint64(0)
   505  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   506  		if hash == origin {
   507  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   508  			break
   509  		}
   510  	}
   511  	dlp.dl.lock.RUnlock()
   512  
   513  	// Use the absolute header fetcher to satisfy the query
   514  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   515  }
   516  
   517  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   518  // origin; associated with a particular peer in the download tester. The returned
   519  // function can be used to retrieve batches of headers from the particular peer.
   520  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   521  	dlp.waitDelay()
   522  
   523  	dlp.dl.lock.RLock()
   524  	defer dlp.dl.lock.RUnlock()
   525  
   526  	// Gather the next batch of headers
   527  	hashes := dlp.dl.peerHashes[dlp.id]
   528  	headers := dlp.dl.peerHeaders[dlp.id]
   529  	result := make([]*types.Header, 0, amount)
   530  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   531  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   532  			result = append(result, header)
   533  		}
   534  	}
   535  	// Delay delivery a bit to allow attacks to unfold
   536  	go func() {
   537  		time.Sleep(time.Millisecond)
   538  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   539  	}()
   540  	return nil
   541  }
   542  
   543  // RequestBodies constructs a getBlockBodies method associated with a particular
   544  // peer in the download tester. The returned function can be used to retrieve
   545  // batches of block bodies from the particularly requested peer.
   546  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   547  	dlp.waitDelay()
   548  
   549  	dlp.dl.lock.RLock()
   550  	defer dlp.dl.lock.RUnlock()
   551  
   552  	blocks := dlp.dl.peerBlocks[dlp.id]
   553  
   554  	transactions := make([][]*types.Transaction, 0, len(hashes))
   555  	uncles := make([][]*types.Header, 0, len(hashes))
   556  
   557  	for _, hash := range hashes {
   558  		if block, ok := blocks[hash]; ok {
   559  			transactions = append(transactions, block.Transactions())
   560  			uncles = append(uncles, block.Uncles())
   561  		}
   562  	}
   563  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   564  
   565  	return nil
   566  }
   567  
   568  // RequestReceipts constructs a getReceipts method associated with a particular
   569  // peer in the download tester. The returned function can be used to retrieve
   570  // batches of block receipts from the particularly requested peer.
   571  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   572  	dlp.waitDelay()
   573  
   574  	dlp.dl.lock.RLock()
   575  	defer dlp.dl.lock.RUnlock()
   576  
   577  	receipts := dlp.dl.peerReceipts[dlp.id]
   578  
   579  	results := make([][]*types.Receipt, 0, len(hashes))
   580  	for _, hash := range hashes {
   581  		if receipt, ok := receipts[hash]; ok {
   582  			results = append(results, receipt)
   583  		}
   584  	}
   585  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   586  
   587  	return nil
   588  }
   589  
   590  // RequestNodeData constructs a getNodeData method associated with a particular
   591  // peer in the download tester. The returned function can be used to retrieve
   592  // batches of node state data from the particularly requested peer.
   593  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   594  	dlp.waitDelay()
   595  
   596  	dlp.dl.lock.RLock()
   597  	defer dlp.dl.lock.RUnlock()
   598  
   599  	results := make([][]byte, 0, len(hashes))
   600  	for _, hash := range hashes {
   601  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   602  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   603  				results = append(results, data)
   604  			}
   605  		}
   606  	}
   607  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   608  
   609  	return nil
   610  }
   611  
   612  // assertOwnChain checks if the local chain contains the correct number of items
   613  // of the various chain components.
   614  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   615  	assertOwnForkedChain(t, tester, 1, []int{length})
   616  }
   617  
   618  // assertOwnForkedChain checks if the local forked chain contains the correct
   619  // number of items of the various chain components.
   620  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   621  	// Initialize the counters for the first fork
   622  	headers, blocks := lengths[0], lengths[0]
   623  
   624  	minReceipts, maxReceipts := lengths[0]-fsMinFullBlocks-fsPivotInterval, lengths[0]-fsMinFullBlocks
   625  	if minReceipts < 0 {
   626  		minReceipts = 1
   627  	}
   628  	if maxReceipts < 0 {
   629  		maxReceipts = 1
   630  	}
   631  	// Update the counters for each subsequent fork
   632  	for _, length := range lengths[1:] {
   633  		headers += length - common
   634  		blocks += length - common
   635  
   636  		minReceipts += length - common - fsMinFullBlocks - fsPivotInterval
   637  		maxReceipts += length - common - fsMinFullBlocks
   638  	}
   639  	switch tester.downloader.mode {
   640  	case FullSync:
   641  		minReceipts, maxReceipts = 1, 1
   642  	case LightSync:
   643  		blocks, minReceipts, maxReceipts = 1, 1, 1
   644  	}
   645  	if hs := len(tester.ownHeaders); hs != headers {
   646  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   647  	}
   648  	if bs := len(tester.ownBlocks); bs != blocks {
   649  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   650  	}
   651  	if rs := len(tester.ownReceipts); rs < minReceipts || rs > maxReceipts {
   652  		t.Fatalf("synchronised receipts mismatch: have %v, want between [%v, %v]", rs, minReceipts, maxReceipts)
   653  	}
   654  	// Verify the state trie too for fast syncs
   655  	if tester.downloader.mode == FastSync {
   656  		var index int
   657  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   658  			index = pivot
   659  		} else {
   660  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   661  		}
   662  		if index > 0 {
   663  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(tester.stateDb)); statedb == nil || err != nil {
   664  				t.Fatalf("state reconstruction failed: %v", err)
   665  			}
   666  		}
   667  	}
   668  }
   669  
   670  // Tests that simple synchronization against a canonical chain works correctly.
   671  // In this test common ancestor lookup should be short circuited and not require
   672  // binary searching.
   673  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   674  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   675  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   676  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   677  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   678  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   679  
   680  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   681  	t.Parallel()
   682  
   683  	tester := newTester()
   684  	defer tester.terminate()
   685  
   686  	// Create a small enough block chain to download
   687  	targetBlocks := blockCacheLimit - 15
   688  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   689  
   690  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   691  
   692  	// Synchronise with the peer and make sure all relevant data was retrieved
   693  	if err := tester.sync("peer", nil, mode); err != nil {
   694  		t.Fatalf("failed to synchronise blocks: %v", err)
   695  	}
   696  	assertOwnChain(t, tester, targetBlocks+1)
   697  }
   698  
   699  // Tests that if a large batch of blocks are being downloaded, it is throttled
   700  // until the cached blocks are retrieved.
   701  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   702  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   703  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   704  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   705  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   706  
   707  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   708  	tester := newTester()
   709  	defer tester.terminate()
   710  
   711  	// Create a long block chain to download and the tester
   712  	targetBlocks := 8 * blockCacheLimit
   713  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   714  
   715  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   716  
   717  	// Wrap the importer to allow stepping
   718  	blocked, proceed := uint32(0), make(chan struct{})
   719  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   720  		atomic.StoreUint32(&blocked, uint32(len(results)))
   721  		<-proceed
   722  	}
   723  	// Start a synchronisation concurrently
   724  	errc := make(chan error)
   725  	go func() {
   726  		errc <- tester.sync("peer", nil, mode)
   727  	}()
   728  	// Iteratively take some blocks, always checking the retrieval count
   729  	for {
   730  		// Check the retrieval count synchronously (! reason for this ugly block)
   731  		tester.lock.RLock()
   732  		retrieved := len(tester.ownBlocks)
   733  		tester.lock.RUnlock()
   734  		if retrieved >= targetBlocks+1 {
   735  			break
   736  		}
   737  		// Wait a bit for sync to throttle itself
   738  		var cached, frozen int
   739  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   740  			time.Sleep(25 * time.Millisecond)
   741  
   742  			tester.lock.Lock()
   743  			tester.downloader.queue.lock.Lock()
   744  			cached = len(tester.downloader.queue.blockDonePool)
   745  			if mode == FastSync {
   746  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   747  					if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   748  						cached = receipts
   749  					}
   750  				}
   751  			}
   752  			frozen = int(atomic.LoadUint32(&blocked))
   753  			retrieved = len(tester.ownBlocks)
   754  			tester.downloader.queue.lock.Unlock()
   755  			tester.lock.Unlock()
   756  
   757  			if cached == blockCacheLimit || retrieved+cached+frozen == targetBlocks+1 {
   758  				break
   759  			}
   760  		}
   761  		// Make sure we filled up the cache, then exhaust it
   762  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   763  
   764  		tester.lock.RLock()
   765  		retrieved = len(tester.ownBlocks)
   766  		tester.lock.RUnlock()
   767  		if cached != blockCacheLimit && retrieved+cached+frozen != targetBlocks+1 {
   768  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheLimit, retrieved, frozen, targetBlocks+1)
   769  		}
   770  		// Permit the blocked blocks to import
   771  		if atomic.LoadUint32(&blocked) > 0 {
   772  			atomic.StoreUint32(&blocked, uint32(0))
   773  			proceed <- struct{}{}
   774  		}
   775  	}
   776  	// Check that we haven't pulled more blocks than available
   777  	assertOwnChain(t, tester, targetBlocks+1)
   778  	if err := <-errc; err != nil {
   779  		t.Fatalf("block synchronization failed: %v", err)
   780  	}
   781  }
   782  
   783  // Tests that simple synchronization against a forked chain works correctly. In
   784  // this test common ancestor lookup should *not* be short circuited, and a full
   785  // binary search should be executed.
   786  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   787  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   788  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   789  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   790  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   791  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   792  
   793  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   794  	t.Parallel()
   795  
   796  	tester := newTester()
   797  	defer tester.terminate()
   798  
   799  	// Create a long enough forked chain
   800  	common, fork := MaxHashFetch, 2*MaxHashFetch
   801  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   802  
   803  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   804  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   805  
   806  	// Synchronise with the peer and make sure all blocks were retrieved
   807  	if err := tester.sync("fork A", nil, mode); err != nil {
   808  		t.Fatalf("failed to synchronise blocks: %v", err)
   809  	}
   810  	assertOwnChain(t, tester, common+fork+1)
   811  
   812  	// Synchronise with the second peer and make sure that fork is pulled too
   813  	if err := tester.sync("fork B", nil, mode); err != nil {
   814  		t.Fatalf("failed to synchronise blocks: %v", err)
   815  	}
   816  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   817  }
   818  
   819  // Tests that synchronising against a much shorter but much heavyer fork works
   820  // corrently and is not dropped.
   821  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   822  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   823  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   824  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   825  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   826  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   827  
   828  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   829  	t.Parallel()
   830  
   831  	tester := newTester()
   832  	defer tester.terminate()
   833  
   834  	// Create a long enough forked chain
   835  	common, fork := MaxHashFetch, 4*MaxHashFetch
   836  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   837  
   838  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   839  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   840  
   841  	// Synchronise with the peer and make sure all blocks were retrieved
   842  	if err := tester.sync("light", nil, mode); err != nil {
   843  		t.Fatalf("failed to synchronise blocks: %v", err)
   844  	}
   845  	assertOwnChain(t, tester, common+fork+1)
   846  
   847  	// Synchronise with the second peer and make sure that fork is pulled too
   848  	if err := tester.sync("heavy", nil, mode); err != nil {
   849  		t.Fatalf("failed to synchronise blocks: %v", err)
   850  	}
   851  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   852  }
   853  
   854  // Tests that chain forks are contained within a certain interval of the current
   855  // chain head, ensuring that malicious peers cannot waste resources by feeding
   856  // long dead chains.
   857  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   858  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   859  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   860  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   861  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   862  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   863  
   864  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   865  	t.Parallel()
   866  
   867  	tester := newTester()
   868  	defer tester.terminate()
   869  
   870  	// Create a long enough forked chain
   871  	common, fork := 13, int(MaxForkAncestry+17)
   872  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   873  
   874  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   875  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   876  
   877  	// Synchronise with the peer and make sure all blocks were retrieved
   878  	if err := tester.sync("original", nil, mode); err != nil {
   879  		t.Fatalf("failed to synchronise blocks: %v", err)
   880  	}
   881  	assertOwnChain(t, tester, common+fork+1)
   882  
   883  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   884  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   885  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   886  	}
   887  }
   888  
   889  // Tests that chain forks are contained within a certain interval of the current
   890  // chain head for short but heavy forks too. These are a bit special because they
   891  // take different ancestor lookup paths.
   892  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   893  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   894  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   895  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   896  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   897  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   898  
   899  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   900  	t.Parallel()
   901  
   902  	tester := newTester()
   903  	defer tester.terminate()
   904  
   905  	// Create a long enough forked chain
   906  	common, fork := 13, int(MaxForkAncestry+17)
   907  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   908  
   909  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   910  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   911  
   912  	// Synchronise with the peer and make sure all blocks were retrieved
   913  	if err := tester.sync("original", nil, mode); err != nil {
   914  		t.Fatalf("failed to synchronise blocks: %v", err)
   915  	}
   916  	assertOwnChain(t, tester, common+fork+1)
   917  
   918  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   919  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   920  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   921  	}
   922  }
   923  
   924  // Tests that an inactive downloader will not accept incoming block headers and
   925  // bodies.
   926  func TestInactiveDownloader62(t *testing.T) {
   927  	t.Parallel()
   928  
   929  	tester := newTester()
   930  	defer tester.terminate()
   931  
   932  	// Check that neither block headers nor bodies are accepted
   933  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   934  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   935  	}
   936  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   937  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   938  	}
   939  }
   940  
   941  // Tests that an inactive downloader will not accept incoming block headers,
   942  // bodies and receipts.
   943  func TestInactiveDownloader63(t *testing.T) {
   944  	t.Parallel()
   945  
   946  	tester := newTester()
   947  	defer tester.terminate()
   948  
   949  	// Check that neither block headers nor bodies are accepted
   950  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   951  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   952  	}
   953  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   954  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   955  	}
   956  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   957  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   958  	}
   959  }
   960  
   961  // Tests that a canceled download wipes all previously accumulated state.
   962  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   963  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   964  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   965  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   966  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   967  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   968  
   969  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   970  	t.Parallel()
   971  
   972  	tester := newTester()
   973  	defer tester.terminate()
   974  
   975  	// Create a small enough block chain to download and the tester
   976  	targetBlocks := blockCacheLimit - 15
   977  	if targetBlocks >= MaxHashFetch {
   978  		targetBlocks = MaxHashFetch - 15
   979  	}
   980  	if targetBlocks >= MaxHeaderFetch {
   981  		targetBlocks = MaxHeaderFetch - 15
   982  	}
   983  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   984  
   985  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   986  
   987  	// Make sure canceling works with a pristine downloader
   988  	tester.downloader.Cancel()
   989  	if !tester.downloader.queue.Idle() {
   990  		t.Errorf("download queue not idle")
   991  	}
   992  	// Synchronise with the peer, but cancel afterwards
   993  	if err := tester.sync("peer", nil, mode); err != nil {
   994  		t.Fatalf("failed to synchronise blocks: %v", err)
   995  	}
   996  	tester.downloader.Cancel()
   997  	if !tester.downloader.queue.Idle() {
   998  		t.Errorf("download queue not idle")
   999  	}
  1000  }
  1001  
  1002  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  1003  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
  1004  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
  1005  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
  1006  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
  1007  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
  1008  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
  1009  
  1010  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1011  	t.Parallel()
  1012  
  1013  	tester := newTester()
  1014  	defer tester.terminate()
  1015  
  1016  	// Create various peers with various parts of the chain
  1017  	targetPeers := 8
  1018  	targetBlocks := targetPeers*blockCacheLimit - 15
  1019  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1020  
  1021  	for i := 0; i < targetPeers; i++ {
  1022  		id := fmt.Sprintf("peer #%d", i)
  1023  		tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts)
  1024  	}
  1025  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1026  		t.Fatalf("failed to synchronise blocks: %v", err)
  1027  	}
  1028  	assertOwnChain(t, tester, targetBlocks+1)
  1029  }
  1030  
  1031  // Tests that synchronisations behave well in multi-version protocol environments
  1032  // and not wreak havoc on other nodes in the network.
  1033  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1034  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1035  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1036  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1037  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1038  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1039  
  1040  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1041  	t.Parallel()
  1042  
  1043  	tester := newTester()
  1044  	defer tester.terminate()
  1045  
  1046  	// Create a small enough block chain to download
  1047  	targetBlocks := blockCacheLimit - 15
  1048  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1049  
  1050  	// Create peers of every type
  1051  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1052  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1053  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1054  
  1055  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1056  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1057  		t.Fatalf("failed to synchronise blocks: %v", err)
  1058  	}
  1059  	assertOwnChain(t, tester, targetBlocks+1)
  1060  
  1061  	// Check that no peers have been dropped off
  1062  	for _, version := range []int{62, 63, 64} {
  1063  		peer := fmt.Sprintf("peer %d", version)
  1064  		if _, ok := tester.peerHashes[peer]; !ok {
  1065  			t.Errorf("%s dropped", peer)
  1066  		}
  1067  	}
  1068  }
  1069  
  1070  // Tests that if a block is empty (e.g. header only), no body request should be
  1071  // made, and instead the header should be assembled into a whole block in itself.
  1072  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1073  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1074  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1075  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1076  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1077  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1078  
  1079  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1080  	t.Parallel()
  1081  
  1082  	tester := newTester()
  1083  	defer tester.terminate()
  1084  
  1085  	// Create a block chain to download
  1086  	targetBlocks := 2*blockCacheLimit - 15
  1087  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1088  
  1089  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1090  
  1091  	// Instrument the downloader to signal body requests
  1092  	bodiesHave, receiptsHave := int32(0), int32(0)
  1093  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1094  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1095  	}
  1096  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1097  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1098  	}
  1099  	// Synchronise with the peer and make sure all blocks were retrieved
  1100  	if err := tester.sync("peer", nil, mode); err != nil {
  1101  		t.Fatalf("failed to synchronise blocks: %v", err)
  1102  	}
  1103  	assertOwnChain(t, tester, targetBlocks+1)
  1104  
  1105  	// Validate the number of block bodies that should have been requested
  1106  	bodiesNeeded, receiptsNeeded := 0, 0
  1107  	for _, block := range blocks {
  1108  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1109  			bodiesNeeded++
  1110  		}
  1111  	}
  1112  	for hash, receipt := range receipts {
  1113  		if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= tester.downloader.queue.fastSyncPivot {
  1114  			receiptsNeeded++
  1115  		}
  1116  	}
  1117  	if int(bodiesHave) != bodiesNeeded {
  1118  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1119  	}
  1120  	if int(receiptsHave) != receiptsNeeded {
  1121  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1122  	}
  1123  }
  1124  
  1125  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1126  // stalling the downloader by feeding gapped header chains.
  1127  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1128  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1129  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1130  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1131  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1132  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1133  
  1134  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1135  	t.Parallel()
  1136  
  1137  	tester := newTester()
  1138  	defer tester.terminate()
  1139  
  1140  	// Create a small enough block chain to download
  1141  	targetBlocks := blockCacheLimit - 15
  1142  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1143  
  1144  	// Attempt a full sync with an attacker feeding gapped headers
  1145  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1146  	missing := targetBlocks / 2
  1147  	delete(tester.peerHeaders["attack"], hashes[missing])
  1148  
  1149  	if err := tester.sync("attack", nil, mode); err == nil {
  1150  		t.Fatalf("succeeded attacker synchronisation")
  1151  	}
  1152  	// Synchronise with the valid peer and make sure sync succeeds
  1153  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1154  	if err := tester.sync("valid", nil, mode); err != nil {
  1155  		t.Fatalf("failed to synchronise blocks: %v", err)
  1156  	}
  1157  	assertOwnChain(t, tester, targetBlocks+1)
  1158  }
  1159  
  1160  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1161  // detects the invalid numbering.
  1162  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1163  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1164  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1165  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1166  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1167  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1168  
  1169  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1170  	tester := newTester()
  1171  	defer tester.terminate()
  1172  
  1173  	// Create a small enough block chain to download
  1174  	targetBlocks := blockCacheLimit - 15
  1175  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1176  
  1177  	// Attempt a full sync with an attacker feeding shifted headers
  1178  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1179  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1180  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1181  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1182  
  1183  	if err := tester.sync("attack", nil, mode); err == nil {
  1184  		t.Fatalf("succeeded attacker synchronisation")
  1185  	}
  1186  	// Synchronise with the valid peer and make sure sync succeeds
  1187  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1188  	if err := tester.sync("valid", nil, mode); err != nil {
  1189  		t.Fatalf("failed to synchronise blocks: %v", err)
  1190  	}
  1191  	assertOwnChain(t, tester, targetBlocks+1)
  1192  }
  1193  
  1194  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1195  // for various failure scenarios. Afterwards a full sync is attempted to make
  1196  // sure no state was corrupted.
  1197  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1198  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1199  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1200  
  1201  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1202  	tester := newTester()
  1203  	defer tester.terminate()
  1204  
  1205  	// Create a small enough block chain to download
  1206  	targetBlocks := 3*fsHeaderSafetyNet + fsPivotInterval + fsMinFullBlocks
  1207  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1208  
  1209  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1210  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1211  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1212  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1213  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1214  
  1215  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1216  		t.Fatalf("succeeded fast attacker synchronisation")
  1217  	}
  1218  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1219  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1220  	}
  1221  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1222  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1223  	// rolled back, and also the pivot point being reverted to a non-block status.
  1224  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1225  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1226  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1227  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1228  
  1229  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1230  		t.Fatalf("succeeded block attacker synchronisation")
  1231  	}
  1232  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1233  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1234  	}
  1235  	if mode == FastSync {
  1236  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1237  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1238  		}
  1239  	}
  1240  	// Attempt to sync with an attacker that withholds promised blocks after the
  1241  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1242  	// but already imported pivot block.
  1243  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1244  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1245  
  1246  	tester.downloader.fsPivotFails = 0
  1247  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1248  		for i := missing; i <= len(hashes); i++ {
  1249  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1250  		}
  1251  		tester.downloader.syncInitHook = nil
  1252  	}
  1253  
  1254  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1255  		t.Fatalf("succeeded withholding attacker synchronisation")
  1256  	}
  1257  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1258  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1259  	}
  1260  	if mode == FastSync {
  1261  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1262  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1263  		}
  1264  	}
  1265  	tester.downloader.fsPivotFails = fsCriticalTrials
  1266  
  1267  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1268  	// rollback should also disable fast syncing for this process, verify that we
  1269  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1270  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1271  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1272  	if err := tester.sync("valid", nil, mode); err != nil {
  1273  		t.Fatalf("failed to synchronise blocks: %v", err)
  1274  	}
  1275  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1276  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1277  	}
  1278  	if mode != LightSync {
  1279  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1280  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1281  		}
  1282  	}
  1283  }
  1284  
  1285  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1286  // afterwards by not sending any useful hashes.
  1287  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1288  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1289  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1290  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1291  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1292  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1293  
  1294  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1295  	t.Parallel()
  1296  
  1297  	tester := newTester()
  1298  	defer tester.terminate()
  1299  
  1300  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1301  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1302  
  1303  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1304  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1305  	}
  1306  }
  1307  
  1308  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1309  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1310  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1311  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1312  
  1313  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1314  	// Define the disconnection requirement for individual hash fetch errors
  1315  	tests := []struct {
  1316  		result error
  1317  		drop   bool
  1318  	}{
  1319  		{nil, false},                        // Sync succeeded, all is well
  1320  		{errBusy, false},                    // Sync is already in progress, no problem
  1321  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1322  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1323  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1324  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1325  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1326  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1327  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1328  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1329  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1330  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1331  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1332  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1333  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1334  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1335  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1336  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1337  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1338  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1339  	}
  1340  	// Run the tests and check disconnection status
  1341  	tester := newTester()
  1342  	defer tester.terminate()
  1343  
  1344  	for i, tt := range tests {
  1345  		// Register a new peer and ensure it's presence
  1346  		id := fmt.Sprintf("test %d", i)
  1347  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1348  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1349  		}
  1350  		if _, ok := tester.peerHashes[id]; !ok {
  1351  			t.Fatalf("test %d: registered peer not found", i)
  1352  		}
  1353  		// Simulate a synchronisation and check the required result
  1354  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1355  
  1356  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1357  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1358  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1359  		}
  1360  	}
  1361  }
  1362  
  1363  // Tests that synchronisation progress (origin block number, current block number
  1364  // and highest block number) is tracked and updated correctly.
  1365  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1366  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1367  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1368  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1369  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1370  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1371  
  1372  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1373  	t.Parallel()
  1374  
  1375  	tester := newTester()
  1376  	defer tester.terminate()
  1377  
  1378  	// Create a small enough block chain to download
  1379  	targetBlocks := blockCacheLimit - 15
  1380  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1381  
  1382  	// Set a sync init hook to catch progress changes
  1383  	starting := make(chan struct{})
  1384  	progress := make(chan struct{})
  1385  
  1386  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1387  		starting <- struct{}{}
  1388  		<-progress
  1389  	}
  1390  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1391  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1392  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1393  	}
  1394  	// Synchronise half the blocks and check initial progress
  1395  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1396  	pending := new(sync.WaitGroup)
  1397  	pending.Add(1)
  1398  
  1399  	go func() {
  1400  		defer pending.Done()
  1401  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1402  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1403  		}
  1404  	}()
  1405  	<-starting
  1406  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1407  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1408  	}
  1409  	progress <- struct{}{}
  1410  	pending.Wait()
  1411  
  1412  	// Synchronise all the blocks and check continuation progress
  1413  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1414  	pending.Add(1)
  1415  
  1416  	go func() {
  1417  		defer pending.Done()
  1418  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1419  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1420  		}
  1421  	}()
  1422  	<-starting
  1423  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1424  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1425  	}
  1426  	progress <- struct{}{}
  1427  	pending.Wait()
  1428  
  1429  	// Check final progress after successful sync
  1430  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1431  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1432  	}
  1433  }
  1434  
  1435  // Tests that synchronisation progress (origin block number and highest block
  1436  // number) is tracked and updated correctly in case of a fork (or manual head
  1437  // revertal).
  1438  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1439  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1440  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1441  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1442  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1443  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1444  
  1445  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1446  	t.Parallel()
  1447  
  1448  	tester := newTester()
  1449  	defer tester.terminate()
  1450  
  1451  	// Create a forked chain to simulate origin revertal
  1452  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1453  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1454  
  1455  	// Set a sync init hook to catch progress changes
  1456  	starting := make(chan struct{})
  1457  	progress := make(chan struct{})
  1458  
  1459  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1460  		starting <- struct{}{}
  1461  		<-progress
  1462  	}
  1463  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1464  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1465  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1466  	}
  1467  	// Synchronise with one of the forks and check progress
  1468  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1469  	pending := new(sync.WaitGroup)
  1470  	pending.Add(1)
  1471  
  1472  	go func() {
  1473  		defer pending.Done()
  1474  		if err := tester.sync("fork A", nil, mode); err != nil {
  1475  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1476  		}
  1477  	}()
  1478  	<-starting
  1479  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1480  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1481  	}
  1482  	progress <- struct{}{}
  1483  	pending.Wait()
  1484  
  1485  	// Simulate a successful sync above the fork
  1486  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1487  
  1488  	// Synchronise with the second fork and check progress resets
  1489  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1490  	pending.Add(1)
  1491  
  1492  	go func() {
  1493  		defer pending.Done()
  1494  		if err := tester.sync("fork B", nil, mode); err != nil {
  1495  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1496  		}
  1497  	}()
  1498  	<-starting
  1499  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1500  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1501  	}
  1502  	progress <- struct{}{}
  1503  	pending.Wait()
  1504  
  1505  	// Check final progress after successful sync
  1506  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1507  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1508  	}
  1509  }
  1510  
  1511  // Tests that if synchronisation is aborted due to some failure, then the progress
  1512  // origin is not updated in the next sync cycle, as it should be considered the
  1513  // continuation of the previous sync and not a new instance.
  1514  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1515  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1516  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1517  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1518  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1519  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1520  
  1521  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1522  	t.Parallel()
  1523  
  1524  	tester := newTester()
  1525  	defer tester.terminate()
  1526  
  1527  	// Create a small enough block chain to download
  1528  	targetBlocks := blockCacheLimit - 15
  1529  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1530  
  1531  	// Set a sync init hook to catch progress changes
  1532  	starting := make(chan struct{})
  1533  	progress := make(chan struct{})
  1534  
  1535  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1536  		starting <- struct{}{}
  1537  		<-progress
  1538  	}
  1539  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1540  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1541  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1542  	}
  1543  	// Attempt a full sync with a faulty peer
  1544  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1545  	missing := targetBlocks / 2
  1546  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1547  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1548  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1549  
  1550  	pending := new(sync.WaitGroup)
  1551  	pending.Add(1)
  1552  
  1553  	go func() {
  1554  		defer pending.Done()
  1555  		if err := tester.sync("faulty", nil, mode); err == nil {
  1556  			panic("succeeded faulty synchronisation")
  1557  		}
  1558  	}()
  1559  	<-starting
  1560  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1561  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1562  	}
  1563  	progress <- struct{}{}
  1564  	pending.Wait()
  1565  
  1566  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1567  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1568  	pending.Add(1)
  1569  
  1570  	go func() {
  1571  		defer pending.Done()
  1572  		if err := tester.sync("valid", nil, mode); err != nil {
  1573  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1574  		}
  1575  	}()
  1576  	<-starting
  1577  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1578  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1579  	}
  1580  	progress <- struct{}{}
  1581  	pending.Wait()
  1582  
  1583  	// Check final progress after successful sync
  1584  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1585  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1586  	}
  1587  }
  1588  
  1589  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1590  // the progress height is successfully reduced at the next sync invocation.
  1591  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1592  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1593  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1594  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1595  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1596  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1597  
  1598  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1599  	t.Parallel()
  1600  
  1601  	tester := newTester()
  1602  	defer tester.terminate()
  1603  
  1604  	// Create a small block chain
  1605  	targetBlocks := blockCacheLimit - 15
  1606  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1607  
  1608  	// Set a sync init hook to catch progress changes
  1609  	starting := make(chan struct{})
  1610  	progress := make(chan struct{})
  1611  
  1612  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1613  		starting <- struct{}{}
  1614  		<-progress
  1615  	}
  1616  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1617  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1618  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1619  	}
  1620  	//  Create and sync with an attacker that promises a higher chain than available
  1621  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1622  	for i := 1; i < 3; i++ {
  1623  		delete(tester.peerHeaders["attack"], hashes[i])
  1624  		delete(tester.peerBlocks["attack"], hashes[i])
  1625  		delete(tester.peerReceipts["attack"], hashes[i])
  1626  	}
  1627  
  1628  	pending := new(sync.WaitGroup)
  1629  	pending.Add(1)
  1630  
  1631  	go func() {
  1632  		defer pending.Done()
  1633  		if err := tester.sync("attack", nil, mode); err == nil {
  1634  			panic("succeeded attacker synchronisation")
  1635  		}
  1636  	}()
  1637  	<-starting
  1638  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1639  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1640  	}
  1641  	progress <- struct{}{}
  1642  	pending.Wait()
  1643  
  1644  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1645  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1646  	pending.Add(1)
  1647  
  1648  	go func() {
  1649  		defer pending.Done()
  1650  		if err := tester.sync("valid", nil, mode); err != nil {
  1651  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1652  		}
  1653  	}()
  1654  	<-starting
  1655  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1656  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1657  	}
  1658  	progress <- struct{}{}
  1659  	pending.Wait()
  1660  
  1661  	// Check final progress after successful sync
  1662  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1663  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1664  	}
  1665  }
  1666  
  1667  // This test reproduces an issue where unexpected deliveries would
  1668  // block indefinitely if they arrived at the right time.
  1669  func TestDeliverHeadersHang62(t *testing.T)      { testDeliverHeadersHang(t, 62, FullSync) }
  1670  func TestDeliverHeadersHang63Full(t *testing.T)  { testDeliverHeadersHang(t, 63, FullSync) }
  1671  func TestDeliverHeadersHang63Fast(t *testing.T)  { testDeliverHeadersHang(t, 63, FastSync) }
  1672  func TestDeliverHeadersHang64Full(t *testing.T)  { testDeliverHeadersHang(t, 64, FullSync) }
  1673  func TestDeliverHeadersHang64Fast(t *testing.T)  { testDeliverHeadersHang(t, 64, FastSync) }
  1674  func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
  1675  
  1676  type floodingTestPeer struct {
  1677  	peer   Peer
  1678  	tester *downloadTester
  1679  }
  1680  
  1681  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1682  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1683  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1684  }
  1685  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1686  	return ftp.peer.RequestBodies(hashes)
  1687  }
  1688  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1689  	return ftp.peer.RequestReceipts(hashes)
  1690  }
  1691  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1692  	return ftp.peer.RequestNodeData(hashes)
  1693  }
  1694  
  1695  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1696  	deliveriesDone := make(chan struct{}, 500)
  1697  	for i := 0; i < cap(deliveriesDone); i++ {
  1698  		peer := fmt.Sprintf("fake-peer%d", i)
  1699  		go func() {
  1700  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1701  			deliveriesDone <- struct{}{}
  1702  		}()
  1703  	}
  1704  	// Deliver the actual requested headers.
  1705  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1706  	// None of the extra deliveries should block.
  1707  	timeout := time.After(15 * time.Second)
  1708  	for i := 0; i < cap(deliveriesDone); i++ {
  1709  		select {
  1710  		case <-deliveriesDone:
  1711  		case <-timeout:
  1712  			panic("blocked")
  1713  		}
  1714  	}
  1715  	return nil
  1716  }
  1717  
  1718  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1719  	t.Parallel()
  1720  
  1721  	master := newTester()
  1722  	defer master.terminate()
  1723  
  1724  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1725  	for i := 0; i < 200; i++ {
  1726  		tester := newTester()
  1727  		tester.peerDb = master.peerDb
  1728  
  1729  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1730  		// Whenever the downloader requests headers, flood it with
  1731  		// a lot of unrequested header deliveries.
  1732  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1733  			tester.downloader.peers.peers["peer"].peer,
  1734  			tester,
  1735  		}
  1736  
  1737  		if err := tester.sync("peer", nil, mode); err != nil {
  1738  			t.Errorf("sync failed: %v", err)
  1739  		}
  1740  		tester.terminate()
  1741  	}
  1742  }
  1743  
  1744  // Tests that if fast sync aborts in the critical section, it can restart a few
  1745  // times before giving up.
  1746  func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
  1747  func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
  1748  func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
  1749  func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
  1750  
  1751  func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
  1752  	tester := newTester()
  1753  	defer tester.terminate()
  1754  
  1755  	// Create a large enough blockchin to actually fast sync on
  1756  	targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15
  1757  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1758  
  1759  	// Create a tester peer with a critical section header missing (force failures)
  1760  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1761  	delete(tester.peerHeaders["peer"], hashes[fsMinFullBlocks-1])
  1762  	tester.downloader.dropPeer = func(id string) {} // We reuse the same "faulty" peer throughout the test
  1763  
  1764  	// Remove all possible pivot state roots and slow down replies (test failure resets later)
  1765  	for i := 0; i < fsPivotInterval; i++ {
  1766  		tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
  1767  	}
  1768  	(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).setDelay(500 * time.Millisecond) // Enough to reach the critical section
  1769  
  1770  	// Synchronise with the peer a few times and make sure they fail until the retry limit
  1771  	for i := 0; i < int(fsCriticalTrials)-1; i++ {
  1772  		// Attempt a sync and ensure it fails properly
  1773  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1774  			t.Fatalf("failing fast sync succeeded: %v", err)
  1775  		}
  1776  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1777  
  1778  		// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
  1779  		if i == 0 {
  1780  			if tester.downloader.fsPivotLock == nil {
  1781  				time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
  1782  				t.Fatalf("pivot block not locked in after critical section failure")
  1783  			}
  1784  			tester.lock.Lock()
  1785  			tester.peerHeaders["peer"][hashes[fsMinFullBlocks-1]] = headers[hashes[fsMinFullBlocks-1]]
  1786  			tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true}
  1787  			(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).setDelay(0)
  1788  			tester.lock.Unlock()
  1789  		}
  1790  	}
  1791  	// Return all nodes if we're testing fast sync progression
  1792  	if progress {
  1793  		tester.lock.Lock()
  1794  		tester.peerMissingStates["peer"] = map[common.Hash]bool{}
  1795  		tester.lock.Unlock()
  1796  
  1797  		if err := tester.sync("peer", nil, FastSync); err != nil {
  1798  			t.Fatalf("failed to synchronise blocks in progressed fast sync: %v", err)
  1799  		}
  1800  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1801  
  1802  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != 1 {
  1803  			t.Fatalf("progressed pivot trial count mismatch: have %v, want %v", fails, 1)
  1804  		}
  1805  		assertOwnChain(t, tester, targetBlocks+1)
  1806  	} else {
  1807  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1808  			t.Fatalf("succeeded to synchronise blocks in failed fast sync")
  1809  		}
  1810  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1811  
  1812  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != fsCriticalTrials {
  1813  			t.Fatalf("failed pivot trial count mismatch: have %v, want %v", fails, fsCriticalTrials)
  1814  		}
  1815  	}
  1816  	// Retry limit exhausted, downloader will switch to full sync, should succeed
  1817  	if err := tester.sync("peer", nil, FastSync); err != nil {
  1818  		t.Fatalf("failed to synchronise blocks in slow sync: %v", err)
  1819  	}
  1820  	// Note, we can't assert the chain here because the test asserter assumes sync
  1821  	// completed using a single mode of operation, whereas fast-then-slow can result
  1822  	// in arbitrary intermediate state that's not cleanly verifiable.
  1823  }