github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The Spectrum Authors
     2  // This file is part of the Spectrum library.
     3  //
     4  // The Spectrum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The Spectrum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/SmartMeshFoundation/Spectrum/common"
    29  	"github.com/SmartMeshFoundation/Spectrum/consensus/ethash"
    30  	"github.com/SmartMeshFoundation/Spectrum/core"
    31  	"github.com/SmartMeshFoundation/Spectrum/core/state"
    32  	"github.com/SmartMeshFoundation/Spectrum/core/types"
    33  	"github.com/SmartMeshFoundation/Spectrum/crypto"
    34  	"github.com/SmartMeshFoundation/Spectrum/ethdb"
    35  	"github.com/SmartMeshFoundation/Spectrum/event"
    36  	"github.com/SmartMeshFoundation/Spectrum/params"
    37  	"github.com/SmartMeshFoundation/Spectrum/trie"
    38  )
    39  
    40  var (
    41  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    42  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    43  )
    44  
    45  // Reduce some of the parameters to make the tester faster.
    46  func init() {
    47  	MaxForkAncestry = uint64(10000)
    48  	blockCacheLimit = 1024
    49  	fsCriticalTrials = 10
    50  }
    51  
    52  // downloadTester is a test simulator for mocking out local block chain.
    53  type downloadTester struct {
    54  	downloader *Downloader
    55  
    56  	genesis *types.Block   // Genesis blocks used by the tester and peers
    57  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    58  	peerDb  ethdb.Database // Database of the peers containing all data
    59  
    60  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    61  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    62  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    63  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    64  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    65  
    66  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    67  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    68  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    69  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    70  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    71  
    72  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    73  
    74  	lock sync.RWMutex
    75  }
    76  
    77  // newTester creates a new downloader test mocker.
    78  func newTester() *downloadTester {
    79  	testdb, _ := ethdb.NewMemDatabase()
    80  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    81  
    82  	tester := &downloadTester{
    83  		genesis:           genesis,
    84  		peerDb:            testdb,
    85  		ownHashes:         []common.Hash{genesis.Hash()},
    86  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    87  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    88  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    89  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    90  		peerHashes:        make(map[string][]common.Hash),
    91  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    92  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    93  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    94  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    95  		peerMissingStates: make(map[string]map[common.Hash]bool),
    96  	}
    97  	tester.stateDb, _ = ethdb.NewMemDatabase()
    98  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    99  
   100  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   101  
   102  	return tester
   103  }
   104  
   105  // makeChain creates a chain of n blocks starting at and including parent.
   106  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   107  // contains a transaction and every 5th an uncle to allow testing correct block
   108  // reassembly.
   109  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   110  	// Generate the block chain
   111  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), dl.peerDb, n, func(i int, block *core.BlockGen) {
   112  		block.SetCoinbase(common.Address{seed})
   113  
   114  		// If a heavy chain is requested, delay blocks to raise difficulty
   115  		if heavy {
   116  			block.OffsetTime(-1)
   117  		}
   118  		// If the block number is multiple of 3, send a bonus transaction to the miner
   119  		if parent == dl.genesis && i%3 == 0 {
   120  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   121  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), new(big.Int).SetUint64(params.TxGas), nil, nil), signer, testKey)
   122  			if err != nil {
   123  				panic(err)
   124  			}
   125  			block.AddTx(tx)
   126  		}
   127  		// If the block number is a multiple of 5, add a bonus uncle to the block
   128  		if i > 0 && i%5 == 0 {
   129  			block.AddUncle(&types.Header{
   130  				ParentHash: block.PrevBlock(i - 1).Hash(),
   131  				Number:     big.NewInt(block.Number().Int64() - 1),
   132  			})
   133  		}
   134  	})
   135  	// Convert the block-chain into a hash-chain and header/block maps
   136  	hashes := make([]common.Hash, n+1)
   137  	hashes[len(hashes)-1] = parent.Hash()
   138  
   139  	headerm := make(map[common.Hash]*types.Header, n+1)
   140  	headerm[parent.Hash()] = parent.Header()
   141  
   142  	blockm := make(map[common.Hash]*types.Block, n+1)
   143  	blockm[parent.Hash()] = parent
   144  
   145  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   146  	receiptm[parent.Hash()] = parentReceipts
   147  
   148  	for i, b := range blocks {
   149  		hashes[len(hashes)-i-2] = b.Hash()
   150  		headerm[b.Hash()] = b.Header()
   151  		blockm[b.Hash()] = b
   152  		receiptm[b.Hash()] = receipts[i]
   153  	}
   154  	return hashes, headerm, blockm, receiptm
   155  }
   156  
   157  // makeChainFork creates two chains of length n, such that h1[:f] and
   158  // h2[:f] are different but have a common suffix of length n-f.
   159  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   160  	// Create the common suffix
   161  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   162  
   163  	// Create the forks, making the second heavyer if non balanced forks were requested
   164  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   165  	hashes1 = append(hashes1, hashes[1:]...)
   166  
   167  	heavy := false
   168  	if !balanced {
   169  		heavy = true
   170  	}
   171  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   172  	hashes2 = append(hashes2, hashes[1:]...)
   173  
   174  	for hash, header := range headers {
   175  		headers1[hash] = header
   176  		headers2[hash] = header
   177  	}
   178  	for hash, block := range blocks {
   179  		blocks1[hash] = block
   180  		blocks2[hash] = block
   181  	}
   182  	for hash, receipt := range receipts {
   183  		receipts1[hash] = receipt
   184  		receipts2[hash] = receipt
   185  	}
   186  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   187  }
   188  
   189  // terminate aborts any operations on the embedded downloader and releases all
   190  // held resources.
   191  func (dl *downloadTester) terminate() {
   192  	dl.downloader.Terminate()
   193  }
   194  
   195  // sync starts synchronizing with a remote peer, blocking until it completes.
   196  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   197  	dl.lock.RLock()
   198  	hash := dl.peerHashes[id][0]
   199  	// If no particular TD was requested, load from the peer's blockchain
   200  	if td == nil {
   201  		td = big.NewInt(1)
   202  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   203  			td = diff
   204  		}
   205  	}
   206  	dl.lock.RUnlock()
   207  
   208  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   209  	err := dl.downloader.synchronise(id, hash, td, mode)
   210  	select {
   211  	case <-dl.downloader.cancelCh:
   212  		// Ok, downloader fully cancelled after sync cycle
   213  	default:
   214  		// Downloader is still accepting packets, can block a peer up
   215  		panic("downloader active post sync cycle") // panic will be caught by tester
   216  	}
   217  	return err
   218  }
   219  
   220  // HasHeader checks if a header is present in the testers canonical chain.
   221  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   222  	return dl.GetHeaderByHash(hash) != nil
   223  }
   224  
   225  // HasBlockAndState checks if a block and associated state is present in the testers canonical chain.
   226  func (dl *downloadTester) HasBlockAndState(hash common.Hash) bool {
   227  	block := dl.GetBlockByHash(hash)
   228  	if block == nil {
   229  		return false
   230  	}
   231  	_, err := dl.stateDb.Get(block.Root().Bytes())
   232  	return err == nil
   233  }
   234  
   235  // GetHeader retrieves a header from the testers canonical chain.
   236  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   237  	dl.lock.RLock()
   238  	defer dl.lock.RUnlock()
   239  
   240  	return dl.ownHeaders[hash]
   241  }
   242  
   243  // GetBlock retrieves a block from the testers canonical chain.
   244  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   245  	dl.lock.RLock()
   246  	defer dl.lock.RUnlock()
   247  
   248  	return dl.ownBlocks[hash]
   249  }
   250  
   251  // CurrentHeader retrieves the current head header from the canonical chain.
   252  func (dl *downloadTester) CurrentHeader() *types.Header {
   253  	dl.lock.RLock()
   254  	defer dl.lock.RUnlock()
   255  
   256  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   257  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   258  			return header
   259  		}
   260  	}
   261  	return dl.genesis.Header()
   262  }
   263  
   264  // CurrentBlock retrieves the current head block from the canonical chain.
   265  func (dl *downloadTester) CurrentBlock() *types.Block {
   266  	dl.lock.RLock()
   267  	defer dl.lock.RUnlock()
   268  
   269  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   270  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   271  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   272  				return block
   273  			}
   274  		}
   275  	}
   276  	return dl.genesis
   277  }
   278  
   279  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   280  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   281  	dl.lock.RLock()
   282  	defer dl.lock.RUnlock()
   283  
   284  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   285  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   286  			return block
   287  		}
   288  	}
   289  	return dl.genesis
   290  }
   291  
   292  // FastSyncCommitHead manually sets the head block to a given hash.
   293  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   294  	// For now only check that the state trie is correct
   295  	if block := dl.GetBlockByHash(hash); block != nil {
   296  		_, err := trie.NewSecure(block.Root(), dl.stateDb, 0)
   297  		return err
   298  	}
   299  	return fmt.Errorf("non existent block: %x", hash[:4])
   300  }
   301  
   302  // GetTdByHash retrieves the block's total difficulty from the canonical chain.
   303  func (dl *downloadTester) GetTdByHash(hash common.Hash) *big.Int {
   304  	dl.lock.RLock()
   305  	defer dl.lock.RUnlock()
   306  
   307  	return dl.ownChainTd[hash]
   308  }
   309  
   310  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   311  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   312  	dl.lock.Lock()
   313  	defer dl.lock.Unlock()
   314  
   315  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   316  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   317  		return 0, errors.New("unknown parent")
   318  	}
   319  	for i := 1; i < len(headers); i++ {
   320  		if headers[i].ParentHash != headers[i-1].Hash() {
   321  			return i, errors.New("unknown parent")
   322  		}
   323  	}
   324  	// Do a full insert if pre-checks passed
   325  	for i, header := range headers {
   326  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   327  			continue
   328  		}
   329  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   330  			return i, errors.New("unknown parent")
   331  		}
   332  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   333  		dl.ownHeaders[header.Hash()] = header
   334  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   335  	}
   336  	return len(headers), nil
   337  }
   338  
   339  // InsertChain injects a new batch of blocks into the simulated chain.
   340  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   341  	dl.lock.Lock()
   342  	defer dl.lock.Unlock()
   343  
   344  	for i, block := range blocks {
   345  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   346  			return i, errors.New("unknown parent")
   347  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   348  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   349  		}
   350  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   351  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   352  			dl.ownHeaders[block.Hash()] = block.Header()
   353  		}
   354  		dl.ownBlocks[block.Hash()] = block
   355  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   356  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   357  	}
   358  	return len(blocks), nil
   359  }
   360  
   361  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   362  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   363  	dl.lock.Lock()
   364  	defer dl.lock.Unlock()
   365  
   366  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   367  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   368  			return i, errors.New("unknown owner")
   369  		}
   370  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   371  			return i, errors.New("unknown parent")
   372  		}
   373  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   374  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   375  	}
   376  	return len(blocks), nil
   377  }
   378  
   379  // Rollback removes some recently added elements from the chain.
   380  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   381  	dl.lock.Lock()
   382  	defer dl.lock.Unlock()
   383  
   384  	for i := len(hashes) - 1; i >= 0; i-- {
   385  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   386  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   387  		}
   388  		delete(dl.ownChainTd, hashes[i])
   389  		delete(dl.ownHeaders, hashes[i])
   390  		delete(dl.ownReceipts, hashes[i])
   391  		delete(dl.ownBlocks, hashes[i])
   392  	}
   393  }
   394  
   395  // newPeer registers a new block download source into the downloader.
   396  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   397  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   398  }
   399  
   400  // newSlowPeer registers a new block download source into the downloader, with a
   401  // specific delay time on processing the network packets sent to it, simulating
   402  // potentially slow network IO.
   403  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   404  	dl.lock.Lock()
   405  	defer dl.lock.Unlock()
   406  
   407  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   408  	if err == nil {
   409  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   410  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   411  		copy(dl.peerHashes[id], hashes)
   412  
   413  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   414  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   415  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   416  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   417  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   418  
   419  		genesis := hashes[len(hashes)-1]
   420  		if header := headers[genesis]; header != nil {
   421  			dl.peerHeaders[id][genesis] = header
   422  			dl.peerChainTds[id][genesis] = header.Difficulty
   423  		}
   424  		if block := blocks[genesis]; block != nil {
   425  			dl.peerBlocks[id][genesis] = block
   426  			dl.peerChainTds[id][genesis] = block.Difficulty()
   427  		}
   428  
   429  		for i := len(hashes) - 2; i >= 0; i-- {
   430  			hash := hashes[i]
   431  
   432  			if header, ok := headers[hash]; ok {
   433  				dl.peerHeaders[id][hash] = header
   434  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   435  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   436  				}
   437  			}
   438  			if block, ok := blocks[hash]; ok {
   439  				dl.peerBlocks[id][hash] = block
   440  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   441  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   442  				}
   443  			}
   444  			if receipt, ok := receipts[hash]; ok {
   445  				dl.peerReceipts[id][hash] = receipt
   446  			}
   447  		}
   448  	}
   449  	return err
   450  }
   451  
   452  // dropPeer simulates a hard peer removal from the connection pool.
   453  func (dl *downloadTester) dropPeer(id string) {
   454  	dl.lock.Lock()
   455  	defer dl.lock.Unlock()
   456  
   457  	delete(dl.peerHashes, id)
   458  	delete(dl.peerHeaders, id)
   459  	delete(dl.peerBlocks, id)
   460  	delete(dl.peerChainTds, id)
   461  
   462  	dl.downloader.UnregisterPeer(id)
   463  }
   464  
   465  type downloadTesterPeer struct {
   466  	dl    *downloadTester
   467  	id    string
   468  	delay time.Duration
   469  	lock  sync.RWMutex
   470  }
   471  
   472  // setDelay is a thread safe setter for the network delay value.
   473  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   474  	dlp.lock.Lock()
   475  	defer dlp.lock.Unlock()
   476  
   477  	dlp.delay = delay
   478  }
   479  
   480  // waitDelay is a thread safe way to sleep for the configured time.
   481  func (dlp *downloadTesterPeer) waitDelay() {
   482  	dlp.lock.RLock()
   483  	delay := dlp.delay
   484  	dlp.lock.RUnlock()
   485  
   486  	time.Sleep(delay)
   487  }
   488  
   489  // Head constructs a function to retrieve a peer's current head hash
   490  // and total difficulty.
   491  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   492  	dlp.dl.lock.RLock()
   493  	defer dlp.dl.lock.RUnlock()
   494  
   495  	return dlp.dl.peerHashes[dlp.id][0], nil
   496  }
   497  
   498  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   499  // origin; associated with a particular peer in the download tester. The returned
   500  // function can be used to retrieve batches of headers from the particular peer.
   501  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   502  	// Find the canonical number of the hash
   503  	dlp.dl.lock.RLock()
   504  	number := uint64(0)
   505  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   506  		if hash == origin {
   507  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   508  			break
   509  		}
   510  	}
   511  	dlp.dl.lock.RUnlock()
   512  
   513  	// Use the absolute header fetcher to satisfy the query
   514  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   515  }
   516  
   517  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   518  // origin; associated with a particular peer in the download tester. The returned
   519  // function can be used to retrieve batches of headers from the particular peer.
   520  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   521  	dlp.waitDelay()
   522  
   523  	dlp.dl.lock.RLock()
   524  	defer dlp.dl.lock.RUnlock()
   525  
   526  	// Gather the next batch of headers
   527  	hashes := dlp.dl.peerHashes[dlp.id]
   528  	headers := dlp.dl.peerHeaders[dlp.id]
   529  	result := make([]*types.Header, 0, amount)
   530  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   531  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   532  			result = append(result, header)
   533  		}
   534  	}
   535  	// Delay delivery a bit to allow attacks to unfold
   536  	go func() {
   537  		time.Sleep(time.Millisecond)
   538  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   539  	}()
   540  	return nil
   541  }
   542  
   543  // RequestBodies constructs a getBlockBodies method associated with a particular
   544  // peer in the download tester. The returned function can be used to retrieve
   545  // batches of block bodies from the particularly requested peer.
   546  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   547  	dlp.waitDelay()
   548  
   549  	dlp.dl.lock.RLock()
   550  	defer dlp.dl.lock.RUnlock()
   551  
   552  	blocks := dlp.dl.peerBlocks[dlp.id]
   553  
   554  	transactions := make([][]*types.Transaction, 0, len(hashes))
   555  	uncles := make([][]*types.Header, 0, len(hashes))
   556  
   557  	for _, hash := range hashes {
   558  		if block, ok := blocks[hash]; ok {
   559  			transactions = append(transactions, block.Transactions())
   560  			uncles = append(uncles, block.Uncles())
   561  		}
   562  	}
   563  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   564  
   565  	return nil
   566  }
   567  
   568  // RequestReceipts constructs a getReceipts method associated with a particular
   569  // peer in the download tester. The returned function can be used to retrieve
   570  // batches of block receipts from the particularly requested peer.
   571  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   572  	dlp.waitDelay()
   573  
   574  	dlp.dl.lock.RLock()
   575  	defer dlp.dl.lock.RUnlock()
   576  
   577  	receipts := dlp.dl.peerReceipts[dlp.id]
   578  
   579  	results := make([][]*types.Receipt, 0, len(hashes))
   580  	for _, hash := range hashes {
   581  		if receipt, ok := receipts[hash]; ok {
   582  			results = append(results, receipt)
   583  		}
   584  	}
   585  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   586  
   587  	return nil
   588  }
   589  
   590  // RequestNodeData constructs a getNodeData method associated with a particular
   591  // peer in the download tester. The returned function can be used to retrieve
   592  // batches of node state data from the particularly requested peer.
   593  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   594  	dlp.waitDelay()
   595  
   596  	dlp.dl.lock.RLock()
   597  	defer dlp.dl.lock.RUnlock()
   598  
   599  	results := make([][]byte, 0, len(hashes))
   600  	for _, hash := range hashes {
   601  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   602  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   603  				results = append(results, data)
   604  			}
   605  		}
   606  	}
   607  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   608  
   609  	return nil
   610  }
   611  
   612  // assertOwnChain checks if the local chain contains the correct number of items
   613  // of the various chain components.
   614  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   615  	assertOwnForkedChain(t, tester, 1, []int{length})
   616  }
   617  
   618  // assertOwnForkedChain checks if the local forked chain contains the correct
   619  // number of items of the various chain components.
   620  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   621  	// Initialize the counters for the first fork
   622  	headers, blocks := lengths[0], lengths[0]
   623  
   624  	minReceipts, maxReceipts := lengths[0]-fsMinFullBlocks-fsPivotInterval, lengths[0]-fsMinFullBlocks
   625  	if minReceipts < 0 {
   626  		minReceipts = 1
   627  	}
   628  	if maxReceipts < 0 {
   629  		maxReceipts = 1
   630  	}
   631  	// Update the counters for each subsequent fork
   632  	for _, length := range lengths[1:] {
   633  		headers += length - common
   634  		blocks += length - common
   635  
   636  		minReceipts += length - common - fsMinFullBlocks - fsPivotInterval
   637  		maxReceipts += length - common - fsMinFullBlocks
   638  	}
   639  	switch tester.downloader.mode {
   640  	case FullSync:
   641  		minReceipts, maxReceipts = 1, 1
   642  	case LightSync:
   643  		blocks, minReceipts, maxReceipts = 1, 1, 1
   644  	}
   645  	if hs := len(tester.ownHeaders); hs != headers {
   646  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   647  	}
   648  	if bs := len(tester.ownBlocks); bs != blocks {
   649  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   650  	}
   651  	if rs := len(tester.ownReceipts); rs < minReceipts || rs > maxReceipts {
   652  		t.Fatalf("synchronised receipts mismatch: have %v, want between [%v, %v]", rs, minReceipts, maxReceipts)
   653  	}
   654  	// Verify the state trie too for fast syncs
   655  	if tester.downloader.mode == FastSync {
   656  		var index int
   657  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   658  			index = pivot
   659  		} else {
   660  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   661  		}
   662  		if index > 0 {
   663  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(tester.stateDb)); statedb == nil || err != nil {
   664  				t.Fatalf("state reconstruction failed: %v", err)
   665  			}
   666  		}
   667  	}
   668  }
   669  
   670  // Tests that simple synchronization against a canonical chain works correctly.
   671  // In this test common ancestor lookup should be short circuited and not require
   672  // binary searching.
   673  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   674  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   675  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   676  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   677  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   678  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   679  
   680  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   681  	t.Parallel()
   682  
   683  	tester := newTester()
   684  	defer tester.terminate()
   685  
   686  	// Create a small enough block chain to download
   687  	targetBlocks := blockCacheLimit - 15
   688  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   689  
   690  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   691  
   692  	// Synchronise with the peer and make sure all relevant data was retrieved
   693  	if err := tester.sync("peer", nil, mode); err != nil {
   694  		t.Fatalf("failed to synchronise blocks: %v", err)
   695  	}
   696  	assertOwnChain(t, tester, targetBlocks+1)
   697  }
   698  
   699  // Tests that if a large batch of blocks are being downloaded, it is throttled
   700  // until the cached blocks are retrieved.
   701  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   702  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   703  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   704  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   705  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   706  
   707  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   708  	t.Parallel()
   709  	tester := newTester()
   710  	defer tester.terminate()
   711  
   712  	// Create a long block chain to download and the tester
   713  	targetBlocks := 8 * blockCacheLimit
   714  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   715  
   716  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   717  
   718  	// Wrap the importer to allow stepping
   719  	blocked, proceed := uint32(0), make(chan struct{})
   720  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   721  		atomic.StoreUint32(&blocked, uint32(len(results)))
   722  		<-proceed
   723  	}
   724  	// Start a synchronisation concurrently
   725  	errc := make(chan error)
   726  	go func() {
   727  		errc <- tester.sync("peer", nil, mode)
   728  	}()
   729  	// Iteratively take some blocks, always checking the retrieval count
   730  	for {
   731  		// Check the retrieval count synchronously (! reason for this ugly block)
   732  		tester.lock.RLock()
   733  		retrieved := len(tester.ownBlocks)
   734  		tester.lock.RUnlock()
   735  		if retrieved >= targetBlocks+1 {
   736  			break
   737  		}
   738  		// Wait a bit for sync to throttle itself
   739  		var cached, frozen int
   740  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   741  			time.Sleep(25 * time.Millisecond)
   742  
   743  			tester.lock.Lock()
   744  			tester.downloader.queue.lock.Lock()
   745  			cached = len(tester.downloader.queue.blockDonePool)
   746  			if mode == FastSync {
   747  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   748  					if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   749  						cached = receipts
   750  					}
   751  				}
   752  			}
   753  			frozen = int(atomic.LoadUint32(&blocked))
   754  			retrieved = len(tester.ownBlocks)
   755  			tester.downloader.queue.lock.Unlock()
   756  			tester.lock.Unlock()
   757  
   758  			if cached == blockCacheLimit || retrieved+cached+frozen == targetBlocks+1 {
   759  				break
   760  			}
   761  		}
   762  		// Make sure we filled up the cache, then exhaust it
   763  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   764  
   765  		tester.lock.RLock()
   766  		retrieved = len(tester.ownBlocks)
   767  		tester.lock.RUnlock()
   768  		if cached != blockCacheLimit && retrieved+cached+frozen != targetBlocks+1 {
   769  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheLimit, retrieved, frozen, targetBlocks+1)
   770  		}
   771  		// Permit the blocked blocks to import
   772  		if atomic.LoadUint32(&blocked) > 0 {
   773  			atomic.StoreUint32(&blocked, uint32(0))
   774  			proceed <- struct{}{}
   775  		}
   776  	}
   777  	// Check that we haven't pulled more blocks than available
   778  	assertOwnChain(t, tester, targetBlocks+1)
   779  	if err := <-errc; err != nil {
   780  		t.Fatalf("block synchronization failed: %v", err)
   781  	}
   782  }
   783  
   784  // Tests that simple synchronization against a forked chain works correctly. In
   785  // this test common ancestor lookup should *not* be short circuited, and a full
   786  // binary search should be executed.
   787  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   788  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   789  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   790  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   791  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   792  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   793  
   794  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   795  	t.Parallel()
   796  
   797  	tester := newTester()
   798  	defer tester.terminate()
   799  
   800  	// Create a long enough forked chain
   801  	common, fork := MaxHashFetch, 2*MaxHashFetch
   802  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   803  
   804  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   805  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   806  
   807  	// Synchronise with the peer and make sure all blocks were retrieved
   808  	if err := tester.sync("fork A", nil, mode); err != nil {
   809  		t.Fatalf("failed to synchronise blocks: %v", err)
   810  	}
   811  	assertOwnChain(t, tester, common+fork+1)
   812  
   813  	// Synchronise with the second peer and make sure that fork is pulled too
   814  	if err := tester.sync("fork B", nil, mode); err != nil {
   815  		t.Fatalf("failed to synchronise blocks: %v", err)
   816  	}
   817  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   818  }
   819  
   820  // Tests that synchronising against a much shorter but much heavyer fork works
   821  // corrently and is not dropped.
   822  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   823  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   824  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   825  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   826  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   827  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   828  
   829  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   830  	t.Parallel()
   831  
   832  	tester := newTester()
   833  	defer tester.terminate()
   834  
   835  	// Create a long enough forked chain
   836  	common, fork := MaxHashFetch, 4*MaxHashFetch
   837  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   838  
   839  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   840  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   841  
   842  	// Synchronise with the peer and make sure all blocks were retrieved
   843  	if err := tester.sync("light", nil, mode); err != nil {
   844  		t.Fatalf("failed to synchronise blocks: %v", err)
   845  	}
   846  	assertOwnChain(t, tester, common+fork+1)
   847  
   848  	// Synchronise with the second peer and make sure that fork is pulled too
   849  	if err := tester.sync("heavy", nil, mode); err != nil {
   850  		t.Fatalf("failed to synchronise blocks: %v", err)
   851  	}
   852  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   853  }
   854  
   855  // Tests that chain forks are contained within a certain interval of the current
   856  // chain head, ensuring that malicious peers cannot waste resources by feeding
   857  // long dead chains.
   858  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   859  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   860  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   861  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   862  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   863  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   864  
   865  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   866  	t.Parallel()
   867  
   868  	tester := newTester()
   869  	defer tester.terminate()
   870  
   871  	// Create a long enough forked chain
   872  	common, fork := 13, int(MaxForkAncestry+17)
   873  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   874  
   875  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   876  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   877  
   878  	// Synchronise with the peer and make sure all blocks were retrieved
   879  	if err := tester.sync("original", nil, mode); err != nil {
   880  		t.Fatalf("failed to synchronise blocks: %v", err)
   881  	}
   882  	assertOwnChain(t, tester, common+fork+1)
   883  
   884  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   885  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   886  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   887  	}
   888  }
   889  
   890  // Tests that chain forks are contained within a certain interval of the current
   891  // chain head for short but heavy forks too. These are a bit special because they
   892  // take different ancestor lookup paths.
   893  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   894  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   895  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   896  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   897  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   898  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   899  
   900  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   901  	t.Parallel()
   902  
   903  	tester := newTester()
   904  	defer tester.terminate()
   905  
   906  	// Create a long enough forked chain
   907  	common, fork := 13, int(MaxForkAncestry+17)
   908  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   909  
   910  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   911  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   912  
   913  	// Synchronise with the peer and make sure all blocks were retrieved
   914  	if err := tester.sync("original", nil, mode); err != nil {
   915  		t.Fatalf("failed to synchronise blocks: %v", err)
   916  	}
   917  	assertOwnChain(t, tester, common+fork+1)
   918  
   919  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   920  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   921  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   922  	}
   923  }
   924  
   925  // Tests that an inactive downloader will not accept incoming block headers and
   926  // bodies.
   927  func TestInactiveDownloader62(t *testing.T) {
   928  	t.Parallel()
   929  
   930  	tester := newTester()
   931  	defer tester.terminate()
   932  
   933  	// Check that neither block headers nor bodies are accepted
   934  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   935  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   936  	}
   937  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   938  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   939  	}
   940  }
   941  
   942  // Tests that an inactive downloader will not accept incoming block headers,
   943  // bodies and receipts.
   944  func TestInactiveDownloader63(t *testing.T) {
   945  	t.Parallel()
   946  
   947  	tester := newTester()
   948  	defer tester.terminate()
   949  
   950  	// Check that neither block headers nor bodies are accepted
   951  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   952  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   953  	}
   954  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   955  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   956  	}
   957  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   958  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   959  	}
   960  }
   961  
   962  // Tests that a canceled download wipes all previously accumulated state.
   963  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   964  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   965  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   966  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   967  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   968  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   969  
   970  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   971  	t.Parallel()
   972  
   973  	tester := newTester()
   974  	defer tester.terminate()
   975  
   976  	// Create a small enough block chain to download and the tester
   977  	targetBlocks := blockCacheLimit - 15
   978  	if targetBlocks >= MaxHashFetch {
   979  		targetBlocks = MaxHashFetch - 15
   980  	}
   981  	if targetBlocks >= MaxHeaderFetch {
   982  		targetBlocks = MaxHeaderFetch - 15
   983  	}
   984  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   985  
   986  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   987  
   988  	// Make sure canceling works with a pristine downloader
   989  	tester.downloader.Cancel()
   990  	if !tester.downloader.queue.Idle() {
   991  		t.Errorf("download queue not idle")
   992  	}
   993  	// Synchronise with the peer, but cancel afterwards
   994  	if err := tester.sync("peer", nil, mode); err != nil {
   995  		t.Fatalf("failed to synchronise blocks: %v", err)
   996  	}
   997  	tester.downloader.Cancel()
   998  	if !tester.downloader.queue.Idle() {
   999  		t.Errorf("download queue not idle")
  1000  	}
  1001  }
  1002  
  1003  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  1004  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
  1005  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
  1006  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
  1007  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
  1008  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
  1009  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
  1010  
  1011  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1012  	t.Parallel()
  1013  
  1014  	tester := newTester()
  1015  	defer tester.terminate()
  1016  
  1017  	// Create various peers with various parts of the chain
  1018  	targetPeers := 8
  1019  	targetBlocks := targetPeers*blockCacheLimit - 15
  1020  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1021  
  1022  	for i := 0; i < targetPeers; i++ {
  1023  		id := fmt.Sprintf("peer #%d", i)
  1024  		tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts)
  1025  	}
  1026  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1027  		t.Fatalf("failed to synchronise blocks: %v", err)
  1028  	}
  1029  	assertOwnChain(t, tester, targetBlocks+1)
  1030  }
  1031  
  1032  // Tests that synchronisations behave well in multi-version protocol environments
  1033  // and not wreak havoc on other nodes in the network.
  1034  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1035  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1036  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1037  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1038  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1039  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1040  
  1041  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1042  	t.Parallel()
  1043  
  1044  	tester := newTester()
  1045  	defer tester.terminate()
  1046  
  1047  	// Create a small enough block chain to download
  1048  	targetBlocks := blockCacheLimit - 15
  1049  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1050  
  1051  	// Create peers of every type
  1052  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1053  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1054  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1055  
  1056  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1057  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1058  		t.Fatalf("failed to synchronise blocks: %v", err)
  1059  	}
  1060  	assertOwnChain(t, tester, targetBlocks+1)
  1061  
  1062  	// Check that no peers have been dropped off
  1063  	for _, version := range []int{62, 63, 64} {
  1064  		peer := fmt.Sprintf("peer %d", version)
  1065  		if _, ok := tester.peerHashes[peer]; !ok {
  1066  			t.Errorf("%s dropped", peer)
  1067  		}
  1068  	}
  1069  }
  1070  
  1071  // Tests that if a block is empty (e.g. header only), no body request should be
  1072  // made, and instead the header should be assembled into a whole block in itself.
  1073  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1074  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1075  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1076  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1077  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1078  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1079  
  1080  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1081  	t.Parallel()
  1082  
  1083  	tester := newTester()
  1084  	defer tester.terminate()
  1085  
  1086  	// Create a block chain to download
  1087  	targetBlocks := 2*blockCacheLimit - 15
  1088  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1089  
  1090  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1091  
  1092  	// Instrument the downloader to signal body requests
  1093  	bodiesHave, receiptsHave := int32(0), int32(0)
  1094  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1095  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1096  	}
  1097  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1098  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1099  	}
  1100  	// Synchronise with the peer and make sure all blocks were retrieved
  1101  	if err := tester.sync("peer", nil, mode); err != nil {
  1102  		t.Fatalf("failed to synchronise blocks: %v", err)
  1103  	}
  1104  	assertOwnChain(t, tester, targetBlocks+1)
  1105  
  1106  	// Validate the number of block bodies that should have been requested
  1107  	bodiesNeeded, receiptsNeeded := 0, 0
  1108  	for _, block := range blocks {
  1109  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1110  			bodiesNeeded++
  1111  		}
  1112  	}
  1113  	for hash, receipt := range receipts {
  1114  		if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= tester.downloader.queue.fastSyncPivot {
  1115  			receiptsNeeded++
  1116  		}
  1117  	}
  1118  	if int(bodiesHave) != bodiesNeeded {
  1119  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1120  	}
  1121  	if int(receiptsHave) != receiptsNeeded {
  1122  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1123  	}
  1124  }
  1125  
  1126  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1127  // stalling the downloader by feeding gapped header chains.
  1128  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1129  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1130  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1131  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1132  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1133  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1134  
  1135  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1136  	t.Parallel()
  1137  
  1138  	tester := newTester()
  1139  	defer tester.terminate()
  1140  
  1141  	// Create a small enough block chain to download
  1142  	targetBlocks := blockCacheLimit - 15
  1143  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1144  
  1145  	// Attempt a full sync with an attacker feeding gapped headers
  1146  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1147  	missing := targetBlocks / 2
  1148  	delete(tester.peerHeaders["attack"], hashes[missing])
  1149  
  1150  	if err := tester.sync("attack", nil, mode); err == nil {
  1151  		t.Fatalf("succeeded attacker synchronisation")
  1152  	}
  1153  	// Synchronise with the valid peer and make sure sync succeeds
  1154  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1155  	if err := tester.sync("valid", nil, mode); err != nil {
  1156  		t.Fatalf("failed to synchronise blocks: %v", err)
  1157  	}
  1158  	assertOwnChain(t, tester, targetBlocks+1)
  1159  }
  1160  
  1161  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1162  // detects the invalid numbering.
  1163  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1164  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1165  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1166  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1167  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1168  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1169  
  1170  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1171  	t.Parallel()
  1172  
  1173  	tester := newTester()
  1174  	defer tester.terminate()
  1175  
  1176  	// Create a small enough block chain to download
  1177  	targetBlocks := blockCacheLimit - 15
  1178  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1179  
  1180  	// Attempt a full sync with an attacker feeding shifted headers
  1181  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1182  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1183  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1184  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1185  
  1186  	if err := tester.sync("attack", nil, mode); err == nil {
  1187  		t.Fatalf("succeeded attacker synchronisation")
  1188  	}
  1189  	// Synchronise with the valid peer and make sure sync succeeds
  1190  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1191  	if err := tester.sync("valid", nil, mode); err != nil {
  1192  		t.Fatalf("failed to synchronise blocks: %v", err)
  1193  	}
  1194  	assertOwnChain(t, tester, targetBlocks+1)
  1195  }
  1196  
  1197  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1198  // for various failure scenarios. Afterwards a full sync is attempted to make
  1199  // sure no state was corrupted.
  1200  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1201  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1202  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1203  
  1204  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1205  	t.Parallel()
  1206  
  1207  	tester := newTester()
  1208  	defer tester.terminate()
  1209  
  1210  	// Create a small enough block chain to download
  1211  	targetBlocks := 3*fsHeaderSafetyNet + fsPivotInterval + fsMinFullBlocks
  1212  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1213  
  1214  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1215  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1216  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1217  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1218  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1219  
  1220  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1221  		t.Fatalf("succeeded fast attacker synchronisation")
  1222  	}
  1223  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1224  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1225  	}
  1226  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1227  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1228  	// rolled back, and also the pivot point being reverted to a non-block status.
  1229  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1230  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1231  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1232  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1233  
  1234  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1235  		t.Fatalf("succeeded block attacker synchronisation")
  1236  	}
  1237  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1238  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1239  	}
  1240  	if mode == FastSync {
  1241  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1242  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1243  		}
  1244  	}
  1245  	// Attempt to sync with an attacker that withholds promised blocks after the
  1246  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1247  	// but already imported pivot block.
  1248  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1249  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1250  
  1251  	tester.downloader.fsPivotFails = 0
  1252  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1253  		for i := missing; i <= len(hashes); i++ {
  1254  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1255  		}
  1256  		tester.downloader.syncInitHook = nil
  1257  	}
  1258  
  1259  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1260  		t.Fatalf("succeeded withholding attacker synchronisation")
  1261  	}
  1262  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1263  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1264  	}
  1265  	if mode == FastSync {
  1266  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1267  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1268  		}
  1269  	}
  1270  	tester.downloader.fsPivotFails = fsCriticalTrials
  1271  
  1272  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1273  	// rollback should also disable fast syncing for this process, verify that we
  1274  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1275  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1276  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1277  	if err := tester.sync("valid", nil, mode); err != nil {
  1278  		t.Fatalf("failed to synchronise blocks: %v", err)
  1279  	}
  1280  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1281  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1282  	}
  1283  	if mode != LightSync {
  1284  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1285  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1286  		}
  1287  	}
  1288  }
  1289  
  1290  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1291  // afterwards by not sending any useful hashes.
  1292  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1293  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1294  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1295  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1296  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1297  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1298  
  1299  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1300  	t.Parallel()
  1301  
  1302  	tester := newTester()
  1303  	defer tester.terminate()
  1304  
  1305  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1306  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1307  
  1308  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1309  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1310  	}
  1311  }
  1312  
  1313  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1314  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1315  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1316  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1317  
  1318  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1319  	t.Parallel()
  1320  
  1321  	// Define the disconnection requirement for individual hash fetch errors
  1322  	tests := []struct {
  1323  		result error
  1324  		drop   bool
  1325  	}{
  1326  		{nil, false},                        // Sync succeeded, all is well
  1327  		{errBusy, false},                    // Sync is already in progress, no problem
  1328  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1329  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1330  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1331  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1332  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1333  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1334  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1335  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1336  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1337  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1338  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1339  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1340  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1341  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1342  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1343  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1344  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1345  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1346  	}
  1347  	// Run the tests and check disconnection status
  1348  	tester := newTester()
  1349  	defer tester.terminate()
  1350  
  1351  	for i, tt := range tests {
  1352  		// Register a new peer and ensure it's presence
  1353  		id := fmt.Sprintf("test %d", i)
  1354  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1355  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1356  		}
  1357  		if _, ok := tester.peerHashes[id]; !ok {
  1358  			t.Fatalf("test %d: registered peer not found", i)
  1359  		}
  1360  		// Simulate a synchronisation and check the required result
  1361  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1362  
  1363  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1364  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1365  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1366  		}
  1367  	}
  1368  }
  1369  
  1370  // Tests that synchronisation progress (origin block number, current block number
  1371  // and highest block number) is tracked and updated correctly.
  1372  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1373  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1374  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1375  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1376  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1377  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1378  
  1379  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1380  	t.Parallel()
  1381  
  1382  	tester := newTester()
  1383  	defer tester.terminate()
  1384  
  1385  	// Create a small enough block chain to download
  1386  	targetBlocks := blockCacheLimit - 15
  1387  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1388  
  1389  	// Set a sync init hook to catch progress changes
  1390  	starting := make(chan struct{})
  1391  	progress := make(chan struct{})
  1392  
  1393  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1394  		starting <- struct{}{}
  1395  		<-progress
  1396  	}
  1397  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1398  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1399  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1400  	}
  1401  	// Synchronise half the blocks and check initial progress
  1402  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1403  	pending := new(sync.WaitGroup)
  1404  	pending.Add(1)
  1405  
  1406  	go func() {
  1407  		defer pending.Done()
  1408  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1409  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1410  		}
  1411  	}()
  1412  	<-starting
  1413  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1414  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1415  	}
  1416  	progress <- struct{}{}
  1417  	pending.Wait()
  1418  
  1419  	// Synchronise all the blocks and check continuation progress
  1420  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1421  	pending.Add(1)
  1422  
  1423  	go func() {
  1424  		defer pending.Done()
  1425  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1426  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1427  		}
  1428  	}()
  1429  	<-starting
  1430  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1431  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1432  	}
  1433  	progress <- struct{}{}
  1434  	pending.Wait()
  1435  
  1436  	// Check final progress after successful sync
  1437  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1438  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1439  	}
  1440  }
  1441  
  1442  // Tests that synchronisation progress (origin block number and highest block
  1443  // number) is tracked and updated correctly in case of a fork (or manual head
  1444  // revertal).
  1445  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1446  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1447  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1448  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1449  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1450  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1451  
  1452  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1453  	t.Parallel()
  1454  
  1455  	tester := newTester()
  1456  	defer tester.terminate()
  1457  
  1458  	// Create a forked chain to simulate origin revertal
  1459  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1460  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1461  
  1462  	// Set a sync init hook to catch progress changes
  1463  	starting := make(chan struct{})
  1464  	progress := make(chan struct{})
  1465  
  1466  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1467  		starting <- struct{}{}
  1468  		<-progress
  1469  	}
  1470  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1471  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1472  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1473  	}
  1474  	// Synchronise with one of the forks and check progress
  1475  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1476  	pending := new(sync.WaitGroup)
  1477  	pending.Add(1)
  1478  
  1479  	go func() {
  1480  		defer pending.Done()
  1481  		if err := tester.sync("fork A", nil, mode); err != nil {
  1482  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1483  		}
  1484  	}()
  1485  	<-starting
  1486  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1487  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1488  	}
  1489  	progress <- struct{}{}
  1490  	pending.Wait()
  1491  
  1492  	// Simulate a successful sync above the fork
  1493  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1494  
  1495  	// Synchronise with the second fork and check progress resets
  1496  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1497  	pending.Add(1)
  1498  
  1499  	go func() {
  1500  		defer pending.Done()
  1501  		if err := tester.sync("fork B", nil, mode); err != nil {
  1502  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1503  		}
  1504  	}()
  1505  	<-starting
  1506  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1507  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1508  	}
  1509  	progress <- struct{}{}
  1510  	pending.Wait()
  1511  
  1512  	// Check final progress after successful sync
  1513  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1514  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1515  	}
  1516  }
  1517  
  1518  // Tests that if synchronisation is aborted due to some failure, then the progress
  1519  // origin is not updated in the next sync cycle, as it should be considered the
  1520  // continuation of the previous sync and not a new instance.
  1521  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1522  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1523  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1524  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1525  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1526  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1527  
  1528  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1529  	t.Parallel()
  1530  
  1531  	tester := newTester()
  1532  	defer tester.terminate()
  1533  
  1534  	// Create a small enough block chain to download
  1535  	targetBlocks := blockCacheLimit - 15
  1536  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1537  
  1538  	// Set a sync init hook to catch progress changes
  1539  	starting := make(chan struct{})
  1540  	progress := make(chan struct{})
  1541  
  1542  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1543  		starting <- struct{}{}
  1544  		<-progress
  1545  	}
  1546  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1547  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1548  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1549  	}
  1550  	// Attempt a full sync with a faulty peer
  1551  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1552  	missing := targetBlocks / 2
  1553  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1554  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1555  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1556  
  1557  	pending := new(sync.WaitGroup)
  1558  	pending.Add(1)
  1559  
  1560  	go func() {
  1561  		defer pending.Done()
  1562  		if err := tester.sync("faulty", nil, mode); err == nil {
  1563  			panic("succeeded faulty synchronisation")
  1564  		}
  1565  	}()
  1566  	<-starting
  1567  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1568  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1569  	}
  1570  	progress <- struct{}{}
  1571  	pending.Wait()
  1572  
  1573  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1574  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1575  	pending.Add(1)
  1576  
  1577  	go func() {
  1578  		defer pending.Done()
  1579  		if err := tester.sync("valid", nil, mode); err != nil {
  1580  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1581  		}
  1582  	}()
  1583  	<-starting
  1584  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1585  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1586  	}
  1587  	progress <- struct{}{}
  1588  	pending.Wait()
  1589  
  1590  	// Check final progress after successful sync
  1591  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1592  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1593  	}
  1594  }
  1595  
  1596  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1597  // the progress height is successfully reduced at the next sync invocation.
  1598  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1599  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1600  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1601  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1602  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1603  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1604  
  1605  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1606  	t.Parallel()
  1607  
  1608  	tester := newTester()
  1609  	defer tester.terminate()
  1610  
  1611  	// Create a small block chain
  1612  	targetBlocks := blockCacheLimit - 15
  1613  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1614  
  1615  	// Set a sync init hook to catch progress changes
  1616  	starting := make(chan struct{})
  1617  	progress := make(chan struct{})
  1618  
  1619  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1620  		starting <- struct{}{}
  1621  		<-progress
  1622  	}
  1623  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1624  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1625  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1626  	}
  1627  	//  Create and sync with an attacker that promises a higher chain than available
  1628  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1629  	for i := 1; i < 3; i++ {
  1630  		delete(tester.peerHeaders["attack"], hashes[i])
  1631  		delete(tester.peerBlocks["attack"], hashes[i])
  1632  		delete(tester.peerReceipts["attack"], hashes[i])
  1633  	}
  1634  
  1635  	pending := new(sync.WaitGroup)
  1636  	pending.Add(1)
  1637  
  1638  	go func() {
  1639  		defer pending.Done()
  1640  		if err := tester.sync("attack", nil, mode); err == nil {
  1641  			panic("succeeded attacker synchronisation")
  1642  		}
  1643  	}()
  1644  	<-starting
  1645  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1646  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1647  	}
  1648  	progress <- struct{}{}
  1649  	pending.Wait()
  1650  
  1651  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1652  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1653  	pending.Add(1)
  1654  
  1655  	go func() {
  1656  		defer pending.Done()
  1657  		if err := tester.sync("valid", nil, mode); err != nil {
  1658  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1659  		}
  1660  	}()
  1661  	<-starting
  1662  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1663  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1664  	}
  1665  	progress <- struct{}{}
  1666  	pending.Wait()
  1667  
  1668  	// Check final progress after successful sync
  1669  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1670  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1671  	}
  1672  }
  1673  
  1674  // This test reproduces an issue where unexpected deliveries would
  1675  // block indefinitely if they arrived at the right time.
  1676  // We use data driven subtests to manage this so that it will be parallel on its own
  1677  // and not with the other tests, avoiding intermittent failures.
  1678  func TestDeliverHeadersHang(t *testing.T) {
  1679  	testCases := []struct {
  1680  		protocol int
  1681  		syncMode SyncMode
  1682  	}{
  1683  		{62, FullSync},
  1684  		{63, FullSync},
  1685  		{63, FastSync},
  1686  		{64, FullSync},
  1687  		{64, FastSync},
  1688  		{64, LightSync},
  1689  	}
  1690  	for _, tc := range testCases {
  1691  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1692  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1693  		})
  1694  	}
  1695  }
  1696  
  1697  type floodingTestPeer struct {
  1698  	peer   Peer
  1699  	tester *downloadTester
  1700  }
  1701  
  1702  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1703  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1704  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1705  }
  1706  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1707  	return ftp.peer.RequestBodies(hashes)
  1708  }
  1709  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1710  	return ftp.peer.RequestReceipts(hashes)
  1711  }
  1712  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1713  	return ftp.peer.RequestNodeData(hashes)
  1714  }
  1715  
  1716  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1717  	deliveriesDone := make(chan struct{}, 500)
  1718  	for i := 0; i < cap(deliveriesDone); i++ {
  1719  		peer := fmt.Sprintf("fake-peer%d", i)
  1720  		go func() {
  1721  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1722  			deliveriesDone <- struct{}{}
  1723  		}()
  1724  	}
  1725  	// Deliver the actual requested headers.
  1726  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1727  	// None of the extra deliveries should block.
  1728  	timeout := time.After(60 * time.Second)
  1729  	for i := 0; i < cap(deliveriesDone); i++ {
  1730  		select {
  1731  		case <-deliveriesDone:
  1732  		case <-timeout:
  1733  			panic("blocked")
  1734  		}
  1735  	}
  1736  	return nil
  1737  }
  1738  
  1739  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1740  	t.Parallel()
  1741  
  1742  	master := newTester()
  1743  	defer master.terminate()
  1744  
  1745  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1746  	for i := 0; i < 200; i++ {
  1747  		tester := newTester()
  1748  		tester.peerDb = master.peerDb
  1749  
  1750  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1751  		// Whenever the downloader requests headers, flood it with
  1752  		// a lot of unrequested header deliveries.
  1753  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1754  			tester.downloader.peers.peers["peer"].peer,
  1755  			tester,
  1756  		}
  1757  		if err := tester.sync("peer", nil, mode); err != nil {
  1758  			t.Errorf("sync failed: %v", err)
  1759  		}
  1760  		tester.terminate()
  1761  	}
  1762  }
  1763  
  1764  // Tests that if fast sync aborts in the critical section, it can restart a few
  1765  // times before giving up.
  1766  // We use data driven subtests to manage this so that it will be parallel on its own
  1767  // and not with the other tests, avoiding intermittent failures.
  1768  func TestFastCriticalRestarts(t *testing.T) {
  1769  	testCases := []struct {
  1770  		protocol int
  1771  		progress bool
  1772  	}{
  1773  		{63, false},
  1774  		{64, false},
  1775  		{63, true},
  1776  		{64, true},
  1777  	}
  1778  	for _, tc := range testCases {
  1779  		t.Run(fmt.Sprintf("protocol %d progress %v", tc.protocol, tc.progress), func(t *testing.T) {
  1780  			testFastCriticalRestarts(t, tc.protocol, tc.progress)
  1781  		})
  1782  	}
  1783  }
  1784  
  1785  func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
  1786  	t.Parallel()
  1787  
  1788  	tester := newTester()
  1789  	defer tester.terminate()
  1790  
  1791  	// Create a large enough blockchin to actually fast sync on
  1792  	targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15
  1793  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1794  
  1795  	// Create a tester peer with a critical section header missing (force failures)
  1796  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1797  	delete(tester.peerHeaders["peer"], hashes[fsMinFullBlocks-1])
  1798  	tester.downloader.dropPeer = func(id string) {} // We reuse the same "faulty" peer throughout the test
  1799  
  1800  	// Remove all possible pivot state roots and slow down replies (test failure resets later)
  1801  	for i := 0; i < fsPivotInterval; i++ {
  1802  		tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
  1803  	}
  1804  	(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).setDelay(500 * time.Millisecond) // Enough to reach the critical section
  1805  
  1806  	// Synchronise with the peer a few times and make sure they fail until the retry limit
  1807  	for i := 0; i < int(fsCriticalTrials)-1; i++ {
  1808  		// Attempt a sync and ensure it fails properly
  1809  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1810  			t.Fatalf("failing fast sync succeeded: %v", err)
  1811  		}
  1812  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1813  
  1814  		// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
  1815  		if i == 0 {
  1816  			time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1817  			if tester.downloader.fsPivotLock == nil {
  1818  				time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
  1819  				t.Fatalf("pivot block not locked in after critical section failure")
  1820  			}
  1821  			tester.lock.Lock()
  1822  			tester.peerHeaders["peer"][hashes[fsMinFullBlocks-1]] = headers[hashes[fsMinFullBlocks-1]]
  1823  			tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true}
  1824  			(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).setDelay(0)
  1825  			tester.lock.Unlock()
  1826  		}
  1827  	}
  1828  	// Return all nodes if we're testing fast sync progression
  1829  	if progress {
  1830  		tester.lock.Lock()
  1831  		tester.peerMissingStates["peer"] = map[common.Hash]bool{}
  1832  		tester.lock.Unlock()
  1833  
  1834  		if err := tester.sync("peer", nil, FastSync); err != nil {
  1835  			t.Fatalf("failed to synchronise blocks in progressed fast sync: %v", err)
  1836  		}
  1837  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1838  
  1839  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != 1 {
  1840  			t.Fatalf("progressed pivot trial count mismatch: have %v, want %v", fails, 1)
  1841  		}
  1842  		assertOwnChain(t, tester, targetBlocks+1)
  1843  	} else {
  1844  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1845  			t.Fatalf("succeeded to synchronise blocks in failed fast sync")
  1846  		}
  1847  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1848  
  1849  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != fsCriticalTrials {
  1850  			t.Fatalf("failed pivot trial count mismatch: have %v, want %v", fails, fsCriticalTrials)
  1851  		}
  1852  	}
  1853  	// Retry limit exhausted, downloader will switch to full sync, should succeed
  1854  	if err := tester.sync("peer", nil, FastSync); err != nil {
  1855  		t.Fatalf("failed to synchronise blocks in slow sync: %v", err)
  1856  	}
  1857  	// Note, we can't assert the chain here because the test asserter assumes sync
  1858  	// completed using a single mode of operation, whereas fast-then-slow can result
  1859  	// in arbitrary intermediate state that's not cleanly verifiable.
  1860  }