github.com/tuotoo/go-ethereum@v1.7.4-0.20171121184211-049797d40a24/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/common"
    29  	"github.com/ethereum/go-ethereum/core"
    30  	"github.com/ethereum/go-ethereum/core/state"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/crypto"
    33  	"github.com/ethereum/go-ethereum/ethdb"
    34  	"github.com/ethereum/go-ethereum/event"
    35  	"github.com/ethereum/go-ethereum/params"
    36  	"github.com/ethereum/go-ethereum/trie"
    37  )
    38  
    39  var (
    40  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    41  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    42  )
    43  
    44  // Reduce some of the parameters to make the tester faster.
    45  func init() {
    46  	MaxForkAncestry = uint64(10000)
    47  	blockCacheLimit = 1024
    48  	fsCriticalTrials = 10
    49  }
    50  
    51  // downloadTester is a test simulator for mocking out local block chain.
    52  type downloadTester struct {
    53  	downloader *Downloader
    54  
    55  	genesis *types.Block   // Genesis blocks used by the tester and peers
    56  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    57  	peerDb  ethdb.Database // Database of the peers containing all data
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    66  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    67  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    68  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    69  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    70  
    71  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    72  
    73  	lock sync.RWMutex
    74  }
    75  
    76  // newTester creates a new downloader test mocker.
    77  func newTester() *downloadTester {
    78  	testdb, _ := ethdb.NewMemDatabase()
    79  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    80  
    81  	tester := &downloadTester{
    82  		genesis:           genesis,
    83  		peerDb:            testdb,
    84  		ownHashes:         []common.Hash{genesis.Hash()},
    85  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    86  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    87  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    88  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    89  		peerHashes:        make(map[string][]common.Hash),
    90  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    91  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    92  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    93  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    94  		peerMissingStates: make(map[string]map[common.Hash]bool),
    95  	}
    96  	tester.stateDb, _ = ethdb.NewMemDatabase()
    97  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    98  
    99  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   100  
   101  	return tester
   102  }
   103  
   104  // makeChain creates a chain of n blocks starting at and including parent.
   105  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   106  // contains a transaction and every 5th an uncle to allow testing correct block
   107  // reassembly.
   108  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   109  	// Generate the block chain
   110  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, dl.peerDb, n, func(i int, block *core.BlockGen) {
   111  		block.SetCoinbase(common.Address{seed})
   112  
   113  		// If a heavy chain is requested, delay blocks to raise difficulty
   114  		if heavy {
   115  			block.OffsetTime(-1)
   116  		}
   117  		// If the block number is multiple of 3, send a bonus transaction to the miner
   118  		if parent == dl.genesis && i%3 == 0 {
   119  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   120  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), new(big.Int).SetUint64(params.TxGas), nil, nil), signer, testKey)
   121  			if err != nil {
   122  				panic(err)
   123  			}
   124  			block.AddTx(tx)
   125  		}
   126  		// If the block number is a multiple of 5, add a bonus uncle to the block
   127  		if i > 0 && i%5 == 0 {
   128  			block.AddUncle(&types.Header{
   129  				ParentHash: block.PrevBlock(i - 1).Hash(),
   130  				Number:     big.NewInt(block.Number().Int64() - 1),
   131  			})
   132  		}
   133  	})
   134  	// Convert the block-chain into a hash-chain and header/block maps
   135  	hashes := make([]common.Hash, n+1)
   136  	hashes[len(hashes)-1] = parent.Hash()
   137  
   138  	headerm := make(map[common.Hash]*types.Header, n+1)
   139  	headerm[parent.Hash()] = parent.Header()
   140  
   141  	blockm := make(map[common.Hash]*types.Block, n+1)
   142  	blockm[parent.Hash()] = parent
   143  
   144  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   145  	receiptm[parent.Hash()] = parentReceipts
   146  
   147  	for i, b := range blocks {
   148  		hashes[len(hashes)-i-2] = b.Hash()
   149  		headerm[b.Hash()] = b.Header()
   150  		blockm[b.Hash()] = b
   151  		receiptm[b.Hash()] = receipts[i]
   152  	}
   153  	return hashes, headerm, blockm, receiptm
   154  }
   155  
   156  // makeChainFork creates two chains of length n, such that h1[:f] and
   157  // h2[:f] are different but have a common suffix of length n-f.
   158  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   159  	// Create the common suffix
   160  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   161  
   162  	// Create the forks, making the second heavyer if non balanced forks were requested
   163  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   164  	hashes1 = append(hashes1, hashes[1:]...)
   165  
   166  	heavy := false
   167  	if !balanced {
   168  		heavy = true
   169  	}
   170  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   171  	hashes2 = append(hashes2, hashes[1:]...)
   172  
   173  	for hash, header := range headers {
   174  		headers1[hash] = header
   175  		headers2[hash] = header
   176  	}
   177  	for hash, block := range blocks {
   178  		blocks1[hash] = block
   179  		blocks2[hash] = block
   180  	}
   181  	for hash, receipt := range receipts {
   182  		receipts1[hash] = receipt
   183  		receipts2[hash] = receipt
   184  	}
   185  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   186  }
   187  
   188  // terminate aborts any operations on the embedded downloader and releases all
   189  // held resources.
   190  func (dl *downloadTester) terminate() {
   191  	dl.downloader.Terminate()
   192  }
   193  
   194  // sync starts synchronizing with a remote peer, blocking until it completes.
   195  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   196  	dl.lock.RLock()
   197  	hash := dl.peerHashes[id][0]
   198  	// If no particular TD was requested, load from the peer's blockchain
   199  	if td == nil {
   200  		td = big.NewInt(1)
   201  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   202  			td = diff
   203  		}
   204  	}
   205  	dl.lock.RUnlock()
   206  
   207  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   208  	err := dl.downloader.synchronise(id, hash, td, mode)
   209  	select {
   210  	case <-dl.downloader.cancelCh:
   211  		// Ok, downloader fully cancelled after sync cycle
   212  	default:
   213  		// Downloader is still accepting packets, can block a peer up
   214  		panic("downloader active post sync cycle") // panic will be caught by tester
   215  	}
   216  	return err
   217  }
   218  
   219  // HasHeader checks if a header is present in the testers canonical chain.
   220  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   221  	return dl.GetHeaderByHash(hash) != nil
   222  }
   223  
   224  // HasBlockAndState checks if a block and associated state is present in the testers canonical chain.
   225  func (dl *downloadTester) HasBlockAndState(hash common.Hash) bool {
   226  	block := dl.GetBlockByHash(hash)
   227  	if block == nil {
   228  		return false
   229  	}
   230  	_, err := dl.stateDb.Get(block.Root().Bytes())
   231  	return err == nil
   232  }
   233  
   234  // GetHeader retrieves a header from the testers canonical chain.
   235  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   236  	dl.lock.RLock()
   237  	defer dl.lock.RUnlock()
   238  
   239  	return dl.ownHeaders[hash]
   240  }
   241  
   242  // GetBlock retrieves a block from the testers canonical chain.
   243  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   244  	dl.lock.RLock()
   245  	defer dl.lock.RUnlock()
   246  
   247  	return dl.ownBlocks[hash]
   248  }
   249  
   250  // CurrentHeader retrieves the current head header from the canonical chain.
   251  func (dl *downloadTester) CurrentHeader() *types.Header {
   252  	dl.lock.RLock()
   253  	defer dl.lock.RUnlock()
   254  
   255  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   256  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   257  			return header
   258  		}
   259  	}
   260  	return dl.genesis.Header()
   261  }
   262  
   263  // CurrentBlock retrieves the current head block from the canonical chain.
   264  func (dl *downloadTester) CurrentBlock() *types.Block {
   265  	dl.lock.RLock()
   266  	defer dl.lock.RUnlock()
   267  
   268  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   269  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   270  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   271  				return block
   272  			}
   273  		}
   274  	}
   275  	return dl.genesis
   276  }
   277  
   278  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   279  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   280  	dl.lock.RLock()
   281  	defer dl.lock.RUnlock()
   282  
   283  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   284  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   285  			return block
   286  		}
   287  	}
   288  	return dl.genesis
   289  }
   290  
   291  // FastSyncCommitHead manually sets the head block to a given hash.
   292  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   293  	// For now only check that the state trie is correct
   294  	if block := dl.GetBlockByHash(hash); block != nil {
   295  		_, err := trie.NewSecure(block.Root(), dl.stateDb, 0)
   296  		return err
   297  	}
   298  	return fmt.Errorf("non existent block: %x", hash[:4])
   299  }
   300  
   301  // GetTdByHash retrieves the block's total difficulty from the canonical chain.
   302  func (dl *downloadTester) GetTdByHash(hash common.Hash) *big.Int {
   303  	dl.lock.RLock()
   304  	defer dl.lock.RUnlock()
   305  
   306  	return dl.ownChainTd[hash]
   307  }
   308  
   309  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   310  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   311  	dl.lock.Lock()
   312  	defer dl.lock.Unlock()
   313  
   314  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   315  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   316  		return 0, errors.New("unknown parent")
   317  	}
   318  	for i := 1; i < len(headers); i++ {
   319  		if headers[i].ParentHash != headers[i-1].Hash() {
   320  			return i, errors.New("unknown parent")
   321  		}
   322  	}
   323  	// Do a full insert if pre-checks passed
   324  	for i, header := range headers {
   325  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   326  			continue
   327  		}
   328  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   329  			return i, errors.New("unknown parent")
   330  		}
   331  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   332  		dl.ownHeaders[header.Hash()] = header
   333  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   334  	}
   335  	return len(headers), nil
   336  }
   337  
   338  // InsertChain injects a new batch of blocks into the simulated chain.
   339  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   340  	dl.lock.Lock()
   341  	defer dl.lock.Unlock()
   342  
   343  	for i, block := range blocks {
   344  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   345  			return i, errors.New("unknown parent")
   346  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   347  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   348  		}
   349  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   350  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   351  			dl.ownHeaders[block.Hash()] = block.Header()
   352  		}
   353  		dl.ownBlocks[block.Hash()] = block
   354  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   355  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   356  	}
   357  	return len(blocks), nil
   358  }
   359  
   360  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   361  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   362  	dl.lock.Lock()
   363  	defer dl.lock.Unlock()
   364  
   365  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   366  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   367  			return i, errors.New("unknown owner")
   368  		}
   369  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   370  			return i, errors.New("unknown parent")
   371  		}
   372  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   373  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   374  	}
   375  	return len(blocks), nil
   376  }
   377  
   378  // Rollback removes some recently added elements from the chain.
   379  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   380  	dl.lock.Lock()
   381  	defer dl.lock.Unlock()
   382  
   383  	for i := len(hashes) - 1; i >= 0; i-- {
   384  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   385  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   386  		}
   387  		delete(dl.ownChainTd, hashes[i])
   388  		delete(dl.ownHeaders, hashes[i])
   389  		delete(dl.ownReceipts, hashes[i])
   390  		delete(dl.ownBlocks, hashes[i])
   391  	}
   392  }
   393  
   394  // newPeer registers a new block download source into the downloader.
   395  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   396  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   397  }
   398  
   399  // newSlowPeer registers a new block download source into the downloader, with a
   400  // specific delay time on processing the network packets sent to it, simulating
   401  // potentially slow network IO.
   402  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   403  	dl.lock.Lock()
   404  	defer dl.lock.Unlock()
   405  
   406  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   407  	if err == nil {
   408  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   409  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   410  		copy(dl.peerHashes[id], hashes)
   411  
   412  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   413  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   414  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   415  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   416  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   417  
   418  		genesis := hashes[len(hashes)-1]
   419  		if header := headers[genesis]; header != nil {
   420  			dl.peerHeaders[id][genesis] = header
   421  			dl.peerChainTds[id][genesis] = header.Difficulty
   422  		}
   423  		if block := blocks[genesis]; block != nil {
   424  			dl.peerBlocks[id][genesis] = block
   425  			dl.peerChainTds[id][genesis] = block.Difficulty()
   426  		}
   427  
   428  		for i := len(hashes) - 2; i >= 0; i-- {
   429  			hash := hashes[i]
   430  
   431  			if header, ok := headers[hash]; ok {
   432  				dl.peerHeaders[id][hash] = header
   433  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   434  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   435  				}
   436  			}
   437  			if block, ok := blocks[hash]; ok {
   438  				dl.peerBlocks[id][hash] = block
   439  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   440  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   441  				}
   442  			}
   443  			if receipt, ok := receipts[hash]; ok {
   444  				dl.peerReceipts[id][hash] = receipt
   445  			}
   446  		}
   447  	}
   448  	return err
   449  }
   450  
   451  // dropPeer simulates a hard peer removal from the connection pool.
   452  func (dl *downloadTester) dropPeer(id string) {
   453  	dl.lock.Lock()
   454  	defer dl.lock.Unlock()
   455  
   456  	delete(dl.peerHashes, id)
   457  	delete(dl.peerHeaders, id)
   458  	delete(dl.peerBlocks, id)
   459  	delete(dl.peerChainTds, id)
   460  
   461  	dl.downloader.UnregisterPeer(id)
   462  }
   463  
   464  type downloadTesterPeer struct {
   465  	dl    *downloadTester
   466  	id    string
   467  	delay time.Duration
   468  	lock  sync.RWMutex
   469  }
   470  
   471  // setDelay is a thread safe setter for the network delay value.
   472  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   473  	dlp.lock.Lock()
   474  	defer dlp.lock.Unlock()
   475  
   476  	dlp.delay = delay
   477  }
   478  
   479  // waitDelay is a thread safe way to sleep for the configured time.
   480  func (dlp *downloadTesterPeer) waitDelay() {
   481  	dlp.lock.RLock()
   482  	delay := dlp.delay
   483  	dlp.lock.RUnlock()
   484  
   485  	time.Sleep(delay)
   486  }
   487  
   488  // Head constructs a function to retrieve a peer's current head hash
   489  // and total difficulty.
   490  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   491  	dlp.dl.lock.RLock()
   492  	defer dlp.dl.lock.RUnlock()
   493  
   494  	return dlp.dl.peerHashes[dlp.id][0], nil
   495  }
   496  
   497  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   498  // origin; associated with a particular peer in the download tester. The returned
   499  // function can be used to retrieve batches of headers from the particular peer.
   500  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   501  	// Find the canonical number of the hash
   502  	dlp.dl.lock.RLock()
   503  	number := uint64(0)
   504  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   505  		if hash == origin {
   506  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   507  			break
   508  		}
   509  	}
   510  	dlp.dl.lock.RUnlock()
   511  
   512  	// Use the absolute header fetcher to satisfy the query
   513  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   514  }
   515  
   516  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   517  // origin; associated with a particular peer in the download tester. The returned
   518  // function can be used to retrieve batches of headers from the particular peer.
   519  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   520  	dlp.waitDelay()
   521  
   522  	dlp.dl.lock.RLock()
   523  	defer dlp.dl.lock.RUnlock()
   524  
   525  	// Gather the next batch of headers
   526  	hashes := dlp.dl.peerHashes[dlp.id]
   527  	headers := dlp.dl.peerHeaders[dlp.id]
   528  	result := make([]*types.Header, 0, amount)
   529  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   530  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   531  			result = append(result, header)
   532  		}
   533  	}
   534  	// Delay delivery a bit to allow attacks to unfold
   535  	go func() {
   536  		time.Sleep(time.Millisecond)
   537  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   538  	}()
   539  	return nil
   540  }
   541  
   542  // RequestBodies constructs a getBlockBodies method associated with a particular
   543  // peer in the download tester. The returned function can be used to retrieve
   544  // batches of block bodies from the particularly requested peer.
   545  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   546  	dlp.waitDelay()
   547  
   548  	dlp.dl.lock.RLock()
   549  	defer dlp.dl.lock.RUnlock()
   550  
   551  	blocks := dlp.dl.peerBlocks[dlp.id]
   552  
   553  	transactions := make([][]*types.Transaction, 0, len(hashes))
   554  	uncles := make([][]*types.Header, 0, len(hashes))
   555  
   556  	for _, hash := range hashes {
   557  		if block, ok := blocks[hash]; ok {
   558  			transactions = append(transactions, block.Transactions())
   559  			uncles = append(uncles, block.Uncles())
   560  		}
   561  	}
   562  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   563  
   564  	return nil
   565  }
   566  
   567  // RequestReceipts constructs a getReceipts method associated with a particular
   568  // peer in the download tester. The returned function can be used to retrieve
   569  // batches of block receipts from the particularly requested peer.
   570  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   571  	dlp.waitDelay()
   572  
   573  	dlp.dl.lock.RLock()
   574  	defer dlp.dl.lock.RUnlock()
   575  
   576  	receipts := dlp.dl.peerReceipts[dlp.id]
   577  
   578  	results := make([][]*types.Receipt, 0, len(hashes))
   579  	for _, hash := range hashes {
   580  		if receipt, ok := receipts[hash]; ok {
   581  			results = append(results, receipt)
   582  		}
   583  	}
   584  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   585  
   586  	return nil
   587  }
   588  
   589  // RequestNodeData constructs a getNodeData method associated with a particular
   590  // peer in the download tester. The returned function can be used to retrieve
   591  // batches of node state data from the particularly requested peer.
   592  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   593  	dlp.waitDelay()
   594  
   595  	dlp.dl.lock.RLock()
   596  	defer dlp.dl.lock.RUnlock()
   597  
   598  	results := make([][]byte, 0, len(hashes))
   599  	for _, hash := range hashes {
   600  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   601  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   602  				results = append(results, data)
   603  			}
   604  		}
   605  	}
   606  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   607  
   608  	return nil
   609  }
   610  
   611  // assertOwnChain checks if the local chain contains the correct number of items
   612  // of the various chain components.
   613  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   614  	assertOwnForkedChain(t, tester, 1, []int{length})
   615  }
   616  
   617  // assertOwnForkedChain checks if the local forked chain contains the correct
   618  // number of items of the various chain components.
   619  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   620  	// Initialize the counters for the first fork
   621  	headers, blocks := lengths[0], lengths[0]
   622  
   623  	minReceipts, maxReceipts := lengths[0]-fsMinFullBlocks-fsPivotInterval, lengths[0]-fsMinFullBlocks
   624  	if minReceipts < 0 {
   625  		minReceipts = 1
   626  	}
   627  	if maxReceipts < 0 {
   628  		maxReceipts = 1
   629  	}
   630  	// Update the counters for each subsequent fork
   631  	for _, length := range lengths[1:] {
   632  		headers += length - common
   633  		blocks += length - common
   634  
   635  		minReceipts += length - common - fsMinFullBlocks - fsPivotInterval
   636  		maxReceipts += length - common - fsMinFullBlocks
   637  	}
   638  	switch tester.downloader.mode {
   639  	case FullSync:
   640  		minReceipts, maxReceipts = 1, 1
   641  	case LightSync:
   642  		blocks, minReceipts, maxReceipts = 1, 1, 1
   643  	}
   644  	if hs := len(tester.ownHeaders); hs != headers {
   645  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   646  	}
   647  	if bs := len(tester.ownBlocks); bs != blocks {
   648  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   649  	}
   650  	if rs := len(tester.ownReceipts); rs < minReceipts || rs > maxReceipts {
   651  		t.Fatalf("synchronised receipts mismatch: have %v, want between [%v, %v]", rs, minReceipts, maxReceipts)
   652  	}
   653  	// Verify the state trie too for fast syncs
   654  	if tester.downloader.mode == FastSync {
   655  		var index int
   656  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   657  			index = pivot
   658  		} else {
   659  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   660  		}
   661  		if index > 0 {
   662  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(tester.stateDb)); statedb == nil || err != nil {
   663  				t.Fatalf("state reconstruction failed: %v", err)
   664  			}
   665  		}
   666  	}
   667  }
   668  
   669  // Tests that simple synchronization against a canonical chain works correctly.
   670  // In this test common ancestor lookup should be short circuited and not require
   671  // binary searching.
   672  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   673  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   674  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   675  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   676  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   677  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   678  
   679  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   680  	t.Parallel()
   681  
   682  	tester := newTester()
   683  	defer tester.terminate()
   684  
   685  	// Create a small enough block chain to download
   686  	targetBlocks := blockCacheLimit - 15
   687  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   688  
   689  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   690  
   691  	// Synchronise with the peer and make sure all relevant data was retrieved
   692  	if err := tester.sync("peer", nil, mode); err != nil {
   693  		t.Fatalf("failed to synchronise blocks: %v", err)
   694  	}
   695  	assertOwnChain(t, tester, targetBlocks+1)
   696  }
   697  
   698  // Tests that if a large batch of blocks are being downloaded, it is throttled
   699  // until the cached blocks are retrieved.
   700  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   701  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   702  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   703  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   704  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   705  
   706  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   707  	tester := newTester()
   708  	defer tester.terminate()
   709  
   710  	// Create a long block chain to download and the tester
   711  	targetBlocks := 8 * blockCacheLimit
   712  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   713  
   714  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   715  
   716  	// Wrap the importer to allow stepping
   717  	blocked, proceed := uint32(0), make(chan struct{})
   718  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   719  		atomic.StoreUint32(&blocked, uint32(len(results)))
   720  		<-proceed
   721  	}
   722  	// Start a synchronisation concurrently
   723  	errc := make(chan error)
   724  	go func() {
   725  		errc <- tester.sync("peer", nil, mode)
   726  	}()
   727  	// Iteratively take some blocks, always checking the retrieval count
   728  	for {
   729  		// Check the retrieval count synchronously (! reason for this ugly block)
   730  		tester.lock.RLock()
   731  		retrieved := len(tester.ownBlocks)
   732  		tester.lock.RUnlock()
   733  		if retrieved >= targetBlocks+1 {
   734  			break
   735  		}
   736  		// Wait a bit for sync to throttle itself
   737  		var cached, frozen int
   738  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   739  			time.Sleep(25 * time.Millisecond)
   740  
   741  			tester.lock.Lock()
   742  			tester.downloader.queue.lock.Lock()
   743  			cached = len(tester.downloader.queue.blockDonePool)
   744  			if mode == FastSync {
   745  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   746  					if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   747  						cached = receipts
   748  					}
   749  				}
   750  			}
   751  			frozen = int(atomic.LoadUint32(&blocked))
   752  			retrieved = len(tester.ownBlocks)
   753  			tester.downloader.queue.lock.Unlock()
   754  			tester.lock.Unlock()
   755  
   756  			if cached == blockCacheLimit || retrieved+cached+frozen == targetBlocks+1 {
   757  				break
   758  			}
   759  		}
   760  		// Make sure we filled up the cache, then exhaust it
   761  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   762  
   763  		tester.lock.RLock()
   764  		retrieved = len(tester.ownBlocks)
   765  		tester.lock.RUnlock()
   766  		if cached != blockCacheLimit && retrieved+cached+frozen != targetBlocks+1 {
   767  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheLimit, retrieved, frozen, targetBlocks+1)
   768  		}
   769  		// Permit the blocked blocks to import
   770  		if atomic.LoadUint32(&blocked) > 0 {
   771  			atomic.StoreUint32(&blocked, uint32(0))
   772  			proceed <- struct{}{}
   773  		}
   774  	}
   775  	// Check that we haven't pulled more blocks than available
   776  	assertOwnChain(t, tester, targetBlocks+1)
   777  	if err := <-errc; err != nil {
   778  		t.Fatalf("block synchronization failed: %v", err)
   779  	}
   780  }
   781  
   782  // Tests that simple synchronization against a forked chain works correctly. In
   783  // this test common ancestor lookup should *not* be short circuited, and a full
   784  // binary search should be executed.
   785  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   786  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   787  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   788  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   789  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   790  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   791  
   792  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   793  	t.Parallel()
   794  
   795  	tester := newTester()
   796  	defer tester.terminate()
   797  
   798  	// Create a long enough forked chain
   799  	common, fork := MaxHashFetch, 2*MaxHashFetch
   800  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   801  
   802  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   803  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   804  
   805  	// Synchronise with the peer and make sure all blocks were retrieved
   806  	if err := tester.sync("fork A", nil, mode); err != nil {
   807  		t.Fatalf("failed to synchronise blocks: %v", err)
   808  	}
   809  	assertOwnChain(t, tester, common+fork+1)
   810  
   811  	// Synchronise with the second peer and make sure that fork is pulled too
   812  	if err := tester.sync("fork B", nil, mode); err != nil {
   813  		t.Fatalf("failed to synchronise blocks: %v", err)
   814  	}
   815  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   816  }
   817  
   818  // Tests that synchronising against a much shorter but much heavyer fork works
   819  // corrently and is not dropped.
   820  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   821  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   822  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   823  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   824  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   825  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   826  
   827  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   828  	t.Parallel()
   829  
   830  	tester := newTester()
   831  	defer tester.terminate()
   832  
   833  	// Create a long enough forked chain
   834  	common, fork := MaxHashFetch, 4*MaxHashFetch
   835  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   836  
   837  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   838  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   839  
   840  	// Synchronise with the peer and make sure all blocks were retrieved
   841  	if err := tester.sync("light", nil, mode); err != nil {
   842  		t.Fatalf("failed to synchronise blocks: %v", err)
   843  	}
   844  	assertOwnChain(t, tester, common+fork+1)
   845  
   846  	// Synchronise with the second peer and make sure that fork is pulled too
   847  	if err := tester.sync("heavy", nil, mode); err != nil {
   848  		t.Fatalf("failed to synchronise blocks: %v", err)
   849  	}
   850  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   851  }
   852  
   853  // Tests that chain forks are contained within a certain interval of the current
   854  // chain head, ensuring that malicious peers cannot waste resources by feeding
   855  // long dead chains.
   856  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   857  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   858  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   859  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   860  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   861  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   862  
   863  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   864  	t.Parallel()
   865  
   866  	tester := newTester()
   867  	defer tester.terminate()
   868  
   869  	// Create a long enough forked chain
   870  	common, fork := 13, int(MaxForkAncestry+17)
   871  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   872  
   873  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   874  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   875  
   876  	// Synchronise with the peer and make sure all blocks were retrieved
   877  	if err := tester.sync("original", nil, mode); err != nil {
   878  		t.Fatalf("failed to synchronise blocks: %v", err)
   879  	}
   880  	assertOwnChain(t, tester, common+fork+1)
   881  
   882  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   883  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   884  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   885  	}
   886  }
   887  
   888  // Tests that chain forks are contained within a certain interval of the current
   889  // chain head for short but heavy forks too. These are a bit special because they
   890  // take different ancestor lookup paths.
   891  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   892  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   893  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   894  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   895  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   896  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   897  
   898  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   899  	t.Parallel()
   900  
   901  	tester := newTester()
   902  	defer tester.terminate()
   903  
   904  	// Create a long enough forked chain
   905  	common, fork := 13, int(MaxForkAncestry+17)
   906  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   907  
   908  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   909  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   910  
   911  	// Synchronise with the peer and make sure all blocks were retrieved
   912  	if err := tester.sync("original", nil, mode); err != nil {
   913  		t.Fatalf("failed to synchronise blocks: %v", err)
   914  	}
   915  	assertOwnChain(t, tester, common+fork+1)
   916  
   917  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   918  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   919  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   920  	}
   921  }
   922  
   923  // Tests that an inactive downloader will not accept incoming block headers and
   924  // bodies.
   925  func TestInactiveDownloader62(t *testing.T) {
   926  	t.Parallel()
   927  
   928  	tester := newTester()
   929  	defer tester.terminate()
   930  
   931  	// Check that neither block headers nor bodies are accepted
   932  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   933  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   934  	}
   935  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   936  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   937  	}
   938  }
   939  
   940  // Tests that an inactive downloader will not accept incoming block headers,
   941  // bodies and receipts.
   942  func TestInactiveDownloader63(t *testing.T) {
   943  	t.Parallel()
   944  
   945  	tester := newTester()
   946  	defer tester.terminate()
   947  
   948  	// Check that neither block headers nor bodies are accepted
   949  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   950  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   951  	}
   952  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   953  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   954  	}
   955  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   956  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   957  	}
   958  }
   959  
   960  // Tests that a canceled download wipes all previously accumulated state.
   961  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   962  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   963  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   964  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   965  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   966  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   967  
   968  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   969  	t.Parallel()
   970  
   971  	tester := newTester()
   972  	defer tester.terminate()
   973  
   974  	// Create a small enough block chain to download and the tester
   975  	targetBlocks := blockCacheLimit - 15
   976  	if targetBlocks >= MaxHashFetch {
   977  		targetBlocks = MaxHashFetch - 15
   978  	}
   979  	if targetBlocks >= MaxHeaderFetch {
   980  		targetBlocks = MaxHeaderFetch - 15
   981  	}
   982  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   983  
   984  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   985  
   986  	// Make sure canceling works with a pristine downloader
   987  	tester.downloader.Cancel()
   988  	if !tester.downloader.queue.Idle() {
   989  		t.Errorf("download queue not idle")
   990  	}
   991  	// Synchronise with the peer, but cancel afterwards
   992  	if err := tester.sync("peer", nil, mode); err != nil {
   993  		t.Fatalf("failed to synchronise blocks: %v", err)
   994  	}
   995  	tester.downloader.Cancel()
   996  	if !tester.downloader.queue.Idle() {
   997  		t.Errorf("download queue not idle")
   998  	}
   999  }
  1000  
  1001  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  1002  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
  1003  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
  1004  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
  1005  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
  1006  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
  1007  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
  1008  
  1009  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1010  	t.Parallel()
  1011  
  1012  	tester := newTester()
  1013  	defer tester.terminate()
  1014  
  1015  	// Create various peers with various parts of the chain
  1016  	targetPeers := 8
  1017  	targetBlocks := targetPeers*blockCacheLimit - 15
  1018  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1019  
  1020  	for i := 0; i < targetPeers; i++ {
  1021  		id := fmt.Sprintf("peer #%d", i)
  1022  		tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts)
  1023  	}
  1024  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1025  		t.Fatalf("failed to synchronise blocks: %v", err)
  1026  	}
  1027  	assertOwnChain(t, tester, targetBlocks+1)
  1028  }
  1029  
  1030  // Tests that synchronisations behave well in multi-version protocol environments
  1031  // and not wreak havoc on other nodes in the network.
  1032  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1033  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1034  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1035  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1036  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1037  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1038  
  1039  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1040  	t.Parallel()
  1041  
  1042  	tester := newTester()
  1043  	defer tester.terminate()
  1044  
  1045  	// Create a small enough block chain to download
  1046  	targetBlocks := blockCacheLimit - 15
  1047  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1048  
  1049  	// Create peers of every type
  1050  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1051  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1052  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1053  
  1054  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1055  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1056  		t.Fatalf("failed to synchronise blocks: %v", err)
  1057  	}
  1058  	assertOwnChain(t, tester, targetBlocks+1)
  1059  
  1060  	// Check that no peers have been dropped off
  1061  	for _, version := range []int{62, 63, 64} {
  1062  		peer := fmt.Sprintf("peer %d", version)
  1063  		if _, ok := tester.peerHashes[peer]; !ok {
  1064  			t.Errorf("%s dropped", peer)
  1065  		}
  1066  	}
  1067  }
  1068  
  1069  // Tests that if a block is empty (e.g. header only), no body request should be
  1070  // made, and instead the header should be assembled into a whole block in itself.
  1071  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1072  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1073  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1074  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1075  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1076  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1077  
  1078  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1079  	t.Parallel()
  1080  
  1081  	tester := newTester()
  1082  	defer tester.terminate()
  1083  
  1084  	// Create a block chain to download
  1085  	targetBlocks := 2*blockCacheLimit - 15
  1086  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1087  
  1088  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1089  
  1090  	// Instrument the downloader to signal body requests
  1091  	bodiesHave, receiptsHave := int32(0), int32(0)
  1092  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1093  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1094  	}
  1095  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1096  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1097  	}
  1098  	// Synchronise with the peer and make sure all blocks were retrieved
  1099  	if err := tester.sync("peer", nil, mode); err != nil {
  1100  		t.Fatalf("failed to synchronise blocks: %v", err)
  1101  	}
  1102  	assertOwnChain(t, tester, targetBlocks+1)
  1103  
  1104  	// Validate the number of block bodies that should have been requested
  1105  	bodiesNeeded, receiptsNeeded := 0, 0
  1106  	for _, block := range blocks {
  1107  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1108  			bodiesNeeded++
  1109  		}
  1110  	}
  1111  	for hash, receipt := range receipts {
  1112  		if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= tester.downloader.queue.fastSyncPivot {
  1113  			receiptsNeeded++
  1114  		}
  1115  	}
  1116  	if int(bodiesHave) != bodiesNeeded {
  1117  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1118  	}
  1119  	if int(receiptsHave) != receiptsNeeded {
  1120  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1121  	}
  1122  }
  1123  
  1124  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1125  // stalling the downloader by feeding gapped header chains.
  1126  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1127  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1128  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1129  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1130  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1131  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1132  
  1133  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1134  	t.Parallel()
  1135  
  1136  	tester := newTester()
  1137  	defer tester.terminate()
  1138  
  1139  	// Create a small enough block chain to download
  1140  	targetBlocks := blockCacheLimit - 15
  1141  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1142  
  1143  	// Attempt a full sync with an attacker feeding gapped headers
  1144  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1145  	missing := targetBlocks / 2
  1146  	delete(tester.peerHeaders["attack"], hashes[missing])
  1147  
  1148  	if err := tester.sync("attack", nil, mode); err == nil {
  1149  		t.Fatalf("succeeded attacker synchronisation")
  1150  	}
  1151  	// Synchronise with the valid peer and make sure sync succeeds
  1152  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1153  	if err := tester.sync("valid", nil, mode); err != nil {
  1154  		t.Fatalf("failed to synchronise blocks: %v", err)
  1155  	}
  1156  	assertOwnChain(t, tester, targetBlocks+1)
  1157  }
  1158  
  1159  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1160  // detects the invalid numbering.
  1161  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1162  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1163  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1164  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1165  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1166  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1167  
  1168  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1169  	tester := newTester()
  1170  	defer tester.terminate()
  1171  
  1172  	// Create a small enough block chain to download
  1173  	targetBlocks := blockCacheLimit - 15
  1174  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1175  
  1176  	// Attempt a full sync with an attacker feeding shifted headers
  1177  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1178  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1179  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1180  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1181  
  1182  	if err := tester.sync("attack", nil, mode); err == nil {
  1183  		t.Fatalf("succeeded attacker synchronisation")
  1184  	}
  1185  	// Synchronise with the valid peer and make sure sync succeeds
  1186  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1187  	if err := tester.sync("valid", nil, mode); err != nil {
  1188  		t.Fatalf("failed to synchronise blocks: %v", err)
  1189  	}
  1190  	assertOwnChain(t, tester, targetBlocks+1)
  1191  }
  1192  
  1193  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1194  // for various failure scenarios. Afterwards a full sync is attempted to make
  1195  // sure no state was corrupted.
  1196  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1197  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1198  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1199  
  1200  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1201  	tester := newTester()
  1202  	defer tester.terminate()
  1203  
  1204  	// Create a small enough block chain to download
  1205  	targetBlocks := 3*fsHeaderSafetyNet + fsPivotInterval + fsMinFullBlocks
  1206  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1207  
  1208  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1209  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1210  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1211  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1212  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1213  
  1214  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1215  		t.Fatalf("succeeded fast attacker synchronisation")
  1216  	}
  1217  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1218  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1219  	}
  1220  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1221  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1222  	// rolled back, and also the pivot point being reverted to a non-block status.
  1223  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1224  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1225  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1226  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1227  
  1228  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1229  		t.Fatalf("succeeded block attacker synchronisation")
  1230  	}
  1231  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1232  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1233  	}
  1234  	if mode == FastSync {
  1235  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1236  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1237  		}
  1238  	}
  1239  	// Attempt to sync with an attacker that withholds promised blocks after the
  1240  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1241  	// but already imported pivot block.
  1242  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1243  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1244  
  1245  	tester.downloader.fsPivotFails = 0
  1246  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1247  		for i := missing; i <= len(hashes); i++ {
  1248  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1249  		}
  1250  		tester.downloader.syncInitHook = nil
  1251  	}
  1252  
  1253  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1254  		t.Fatalf("succeeded withholding attacker synchronisation")
  1255  	}
  1256  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1257  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1258  	}
  1259  	if mode == FastSync {
  1260  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1261  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1262  		}
  1263  	}
  1264  	tester.downloader.fsPivotFails = fsCriticalTrials
  1265  
  1266  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1267  	// rollback should also disable fast syncing for this process, verify that we
  1268  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1269  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1270  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1271  	if err := tester.sync("valid", nil, mode); err != nil {
  1272  		t.Fatalf("failed to synchronise blocks: %v", err)
  1273  	}
  1274  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1275  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1276  	}
  1277  	if mode != LightSync {
  1278  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1279  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1280  		}
  1281  	}
  1282  }
  1283  
  1284  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1285  // afterwards by not sending any useful hashes.
  1286  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1287  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1288  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1289  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1290  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1291  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1292  
  1293  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1294  	t.Parallel()
  1295  
  1296  	tester := newTester()
  1297  	defer tester.terminate()
  1298  
  1299  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1300  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1301  
  1302  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1303  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1304  	}
  1305  }
  1306  
  1307  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1308  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1309  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1310  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1311  
  1312  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1313  	// Define the disconnection requirement for individual hash fetch errors
  1314  	tests := []struct {
  1315  		result error
  1316  		drop   bool
  1317  	}{
  1318  		{nil, false},                        // Sync succeeded, all is well
  1319  		{errBusy, false},                    // Sync is already in progress, no problem
  1320  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1321  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1322  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1323  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1324  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1325  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1326  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1327  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1328  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1329  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1330  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1331  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1332  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1333  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1334  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1335  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1336  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1337  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1338  	}
  1339  	// Run the tests and check disconnection status
  1340  	tester := newTester()
  1341  	defer tester.terminate()
  1342  
  1343  	for i, tt := range tests {
  1344  		// Register a new peer and ensure it's presence
  1345  		id := fmt.Sprintf("test %d", i)
  1346  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1347  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1348  		}
  1349  		if _, ok := tester.peerHashes[id]; !ok {
  1350  			t.Fatalf("test %d: registered peer not found", i)
  1351  		}
  1352  		// Simulate a synchronisation and check the required result
  1353  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1354  
  1355  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1356  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1357  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1358  		}
  1359  	}
  1360  }
  1361  
  1362  // Tests that synchronisation progress (origin block number, current block number
  1363  // and highest block number) is tracked and updated correctly.
  1364  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1365  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1366  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1367  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1368  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1369  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1370  
  1371  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1372  	t.Parallel()
  1373  
  1374  	tester := newTester()
  1375  	defer tester.terminate()
  1376  
  1377  	// Create a small enough block chain to download
  1378  	targetBlocks := blockCacheLimit - 15
  1379  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1380  
  1381  	// Set a sync init hook to catch progress changes
  1382  	starting := make(chan struct{})
  1383  	progress := make(chan struct{})
  1384  
  1385  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1386  		starting <- struct{}{}
  1387  		<-progress
  1388  	}
  1389  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1390  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1391  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1392  	}
  1393  	// Synchronise half the blocks and check initial progress
  1394  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1395  	pending := new(sync.WaitGroup)
  1396  	pending.Add(1)
  1397  
  1398  	go func() {
  1399  		defer pending.Done()
  1400  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1401  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1402  		}
  1403  	}()
  1404  	<-starting
  1405  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1406  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1407  	}
  1408  	progress <- struct{}{}
  1409  	pending.Wait()
  1410  
  1411  	// Synchronise all the blocks and check continuation progress
  1412  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1413  	pending.Add(1)
  1414  
  1415  	go func() {
  1416  		defer pending.Done()
  1417  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1418  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1419  		}
  1420  	}()
  1421  	<-starting
  1422  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1423  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1424  	}
  1425  	progress <- struct{}{}
  1426  	pending.Wait()
  1427  
  1428  	// Check final progress after successful sync
  1429  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1430  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1431  	}
  1432  }
  1433  
  1434  // Tests that synchronisation progress (origin block number and highest block
  1435  // number) is tracked and updated correctly in case of a fork (or manual head
  1436  // revertal).
  1437  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1438  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1439  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1440  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1441  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1442  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1443  
  1444  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1445  	t.Parallel()
  1446  
  1447  	tester := newTester()
  1448  	defer tester.terminate()
  1449  
  1450  	// Create a forked chain to simulate origin revertal
  1451  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1452  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1453  
  1454  	// Set a sync init hook to catch progress changes
  1455  	starting := make(chan struct{})
  1456  	progress := make(chan struct{})
  1457  
  1458  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1459  		starting <- struct{}{}
  1460  		<-progress
  1461  	}
  1462  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1463  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1464  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1465  	}
  1466  	// Synchronise with one of the forks and check progress
  1467  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1468  	pending := new(sync.WaitGroup)
  1469  	pending.Add(1)
  1470  
  1471  	go func() {
  1472  		defer pending.Done()
  1473  		if err := tester.sync("fork A", nil, mode); err != nil {
  1474  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1475  		}
  1476  	}()
  1477  	<-starting
  1478  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1479  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1480  	}
  1481  	progress <- struct{}{}
  1482  	pending.Wait()
  1483  
  1484  	// Simulate a successful sync above the fork
  1485  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1486  
  1487  	// Synchronise with the second fork and check progress resets
  1488  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1489  	pending.Add(1)
  1490  
  1491  	go func() {
  1492  		defer pending.Done()
  1493  		if err := tester.sync("fork B", nil, mode); err != nil {
  1494  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1495  		}
  1496  	}()
  1497  	<-starting
  1498  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1499  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1500  	}
  1501  	progress <- struct{}{}
  1502  	pending.Wait()
  1503  
  1504  	// Check final progress after successful sync
  1505  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1506  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1507  	}
  1508  }
  1509  
  1510  // Tests that if synchronisation is aborted due to some failure, then the progress
  1511  // origin is not updated in the next sync cycle, as it should be considered the
  1512  // continuation of the previous sync and not a new instance.
  1513  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1514  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1515  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1516  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1517  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1518  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1519  
  1520  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1521  	t.Parallel()
  1522  
  1523  	tester := newTester()
  1524  	defer tester.terminate()
  1525  
  1526  	// Create a small enough block chain to download
  1527  	targetBlocks := blockCacheLimit - 15
  1528  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1529  
  1530  	// Set a sync init hook to catch progress changes
  1531  	starting := make(chan struct{})
  1532  	progress := make(chan struct{})
  1533  
  1534  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1535  		starting <- struct{}{}
  1536  		<-progress
  1537  	}
  1538  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1539  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1540  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1541  	}
  1542  	// Attempt a full sync with a faulty peer
  1543  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1544  	missing := targetBlocks / 2
  1545  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1546  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1547  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1548  
  1549  	pending := new(sync.WaitGroup)
  1550  	pending.Add(1)
  1551  
  1552  	go func() {
  1553  		defer pending.Done()
  1554  		if err := tester.sync("faulty", nil, mode); err == nil {
  1555  			panic("succeeded faulty synchronisation")
  1556  		}
  1557  	}()
  1558  	<-starting
  1559  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1560  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1561  	}
  1562  	progress <- struct{}{}
  1563  	pending.Wait()
  1564  
  1565  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1566  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1567  	pending.Add(1)
  1568  
  1569  	go func() {
  1570  		defer pending.Done()
  1571  		if err := tester.sync("valid", nil, mode); err != nil {
  1572  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1573  		}
  1574  	}()
  1575  	<-starting
  1576  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1577  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1578  	}
  1579  	progress <- struct{}{}
  1580  	pending.Wait()
  1581  
  1582  	// Check final progress after successful sync
  1583  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1584  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1585  	}
  1586  }
  1587  
  1588  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1589  // the progress height is successfully reduced at the next sync invocation.
  1590  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1591  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1592  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1593  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1594  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1595  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1596  
  1597  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1598  	t.Parallel()
  1599  
  1600  	tester := newTester()
  1601  	defer tester.terminate()
  1602  
  1603  	// Create a small block chain
  1604  	targetBlocks := blockCacheLimit - 15
  1605  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1606  
  1607  	// Set a sync init hook to catch progress changes
  1608  	starting := make(chan struct{})
  1609  	progress := make(chan struct{})
  1610  
  1611  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1612  		starting <- struct{}{}
  1613  		<-progress
  1614  	}
  1615  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1616  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1617  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1618  	}
  1619  	//  Create and sync with an attacker that promises a higher chain than available
  1620  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1621  	for i := 1; i < 3; i++ {
  1622  		delete(tester.peerHeaders["attack"], hashes[i])
  1623  		delete(tester.peerBlocks["attack"], hashes[i])
  1624  		delete(tester.peerReceipts["attack"], hashes[i])
  1625  	}
  1626  
  1627  	pending := new(sync.WaitGroup)
  1628  	pending.Add(1)
  1629  
  1630  	go func() {
  1631  		defer pending.Done()
  1632  		if err := tester.sync("attack", nil, mode); err == nil {
  1633  			panic("succeeded attacker synchronisation")
  1634  		}
  1635  	}()
  1636  	<-starting
  1637  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1638  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1639  	}
  1640  	progress <- struct{}{}
  1641  	pending.Wait()
  1642  
  1643  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1644  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1645  	pending.Add(1)
  1646  
  1647  	go func() {
  1648  		defer pending.Done()
  1649  		if err := tester.sync("valid", nil, mode); err != nil {
  1650  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1651  		}
  1652  	}()
  1653  	<-starting
  1654  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1655  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1656  	}
  1657  	progress <- struct{}{}
  1658  	pending.Wait()
  1659  
  1660  	// Check final progress after successful sync
  1661  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1662  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1663  	}
  1664  }
  1665  
  1666  // This test reproduces an issue where unexpected deliveries would
  1667  // block indefinitely if they arrived at the right time.
  1668  func TestDeliverHeadersHang62(t *testing.T)      { testDeliverHeadersHang(t, 62, FullSync) }
  1669  func TestDeliverHeadersHang63Full(t *testing.T)  { testDeliverHeadersHang(t, 63, FullSync) }
  1670  func TestDeliverHeadersHang63Fast(t *testing.T)  { testDeliverHeadersHang(t, 63, FastSync) }
  1671  func TestDeliverHeadersHang64Full(t *testing.T)  { testDeliverHeadersHang(t, 64, FullSync) }
  1672  func TestDeliverHeadersHang64Fast(t *testing.T)  { testDeliverHeadersHang(t, 64, FastSync) }
  1673  func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
  1674  
  1675  type floodingTestPeer struct {
  1676  	peer   Peer
  1677  	tester *downloadTester
  1678  }
  1679  
  1680  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1681  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1682  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1683  }
  1684  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1685  	return ftp.peer.RequestBodies(hashes)
  1686  }
  1687  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1688  	return ftp.peer.RequestReceipts(hashes)
  1689  }
  1690  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1691  	return ftp.peer.RequestNodeData(hashes)
  1692  }
  1693  
  1694  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1695  	deliveriesDone := make(chan struct{}, 500)
  1696  	for i := 0; i < cap(deliveriesDone); i++ {
  1697  		peer := fmt.Sprintf("fake-peer%d", i)
  1698  		go func() {
  1699  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1700  			deliveriesDone <- struct{}{}
  1701  		}()
  1702  	}
  1703  	// Deliver the actual requested headers.
  1704  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1705  	// None of the extra deliveries should block.
  1706  	timeout := time.After(15 * time.Second)
  1707  	for i := 0; i < cap(deliveriesDone); i++ {
  1708  		select {
  1709  		case <-deliveriesDone:
  1710  		case <-timeout:
  1711  			panic("blocked")
  1712  		}
  1713  	}
  1714  	return nil
  1715  }
  1716  
  1717  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1718  	t.Parallel()
  1719  
  1720  	master := newTester()
  1721  	defer master.terminate()
  1722  
  1723  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1724  	for i := 0; i < 200; i++ {
  1725  		tester := newTester()
  1726  		tester.peerDb = master.peerDb
  1727  
  1728  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1729  		// Whenever the downloader requests headers, flood it with
  1730  		// a lot of unrequested header deliveries.
  1731  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1732  			tester.downloader.peers.peers["peer"].peer,
  1733  			tester,
  1734  		}
  1735  
  1736  		if err := tester.sync("peer", nil, mode); err != nil {
  1737  			t.Errorf("sync failed: %v", err)
  1738  		}
  1739  		tester.terminate()
  1740  	}
  1741  }
  1742  
  1743  // Tests that if fast sync aborts in the critical section, it can restart a few
  1744  // times before giving up.
  1745  func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
  1746  func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
  1747  func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
  1748  func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
  1749  
  1750  func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
  1751  	tester := newTester()
  1752  	defer tester.terminate()
  1753  
  1754  	// Create a large enough blockchin to actually fast sync on
  1755  	targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15
  1756  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1757  
  1758  	// Create a tester peer with a critical section header missing (force failures)
  1759  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1760  	delete(tester.peerHeaders["peer"], hashes[fsMinFullBlocks-1])
  1761  	tester.downloader.dropPeer = func(id string) {} // We reuse the same "faulty" peer throughout the test
  1762  
  1763  	// Remove all possible pivot state roots and slow down replies (test failure resets later)
  1764  	for i := 0; i < fsPivotInterval; i++ {
  1765  		tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
  1766  	}
  1767  	(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).setDelay(500 * time.Millisecond) // Enough to reach the critical section
  1768  
  1769  	// Synchronise with the peer a few times and make sure they fail until the retry limit
  1770  	for i := 0; i < int(fsCriticalTrials)-1; i++ {
  1771  		// Attempt a sync and ensure it fails properly
  1772  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1773  			t.Fatalf("failing fast sync succeeded: %v", err)
  1774  		}
  1775  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1776  
  1777  		// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
  1778  		if i == 0 {
  1779  			if tester.downloader.fsPivotLock == nil {
  1780  				time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
  1781  				t.Fatalf("pivot block not locked in after critical section failure")
  1782  			}
  1783  			tester.lock.Lock()
  1784  			tester.peerHeaders["peer"][hashes[fsMinFullBlocks-1]] = headers[hashes[fsMinFullBlocks-1]]
  1785  			tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true}
  1786  			(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).setDelay(0)
  1787  			tester.lock.Unlock()
  1788  		}
  1789  	}
  1790  	// Return all nodes if we're testing fast sync progression
  1791  	if progress {
  1792  		tester.lock.Lock()
  1793  		tester.peerMissingStates["peer"] = map[common.Hash]bool{}
  1794  		tester.lock.Unlock()
  1795  
  1796  		if err := tester.sync("peer", nil, FastSync); err != nil {
  1797  			t.Fatalf("failed to synchronise blocks in progressed fast sync: %v", err)
  1798  		}
  1799  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1800  
  1801  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != 1 {
  1802  			t.Fatalf("progressed pivot trial count mismatch: have %v, want %v", fails, 1)
  1803  		}
  1804  		assertOwnChain(t, tester, targetBlocks+1)
  1805  	} else {
  1806  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1807  			t.Fatalf("succeeded to synchronise blocks in failed fast sync")
  1808  		}
  1809  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1810  
  1811  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != fsCriticalTrials {
  1812  			t.Fatalf("failed pivot trial count mismatch: have %v, want %v", fails, fsCriticalTrials)
  1813  		}
  1814  	}
  1815  	// Retry limit exhausted, downloader will switch to full sync, should succeed
  1816  	if err := tester.sync("peer", nil, FastSync); err != nil {
  1817  		t.Fatalf("failed to synchronise blocks in slow sync: %v", err)
  1818  	}
  1819  	// Note, we can't assert the chain here because the test asserter assumes sync
  1820  	// completed using a single mode of operation, whereas fast-then-slow can result
  1821  	// in arbitrary intermediate state that's not cleanly verifiable.
  1822  }