github.com/edxfund/validator@v1.8.16-0.20181020093046-c1def72855da/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/EDXFund/Validator/common"
    29  	"github.com/EDXFund/Validator/consensus/ethash"
    30  	"github.com/EDXFund/Validator/core"
    31  	"github.com/EDXFund/Validator/core/types"
    32  	"github.com/EDXFund/Validator/crypto"
    33  	"github.com/EDXFund/Validator/ethdb"
    34  	"github.com/EDXFund/Validator/event"
    35  	"github.com/EDXFund/Validator/params"
    36  	"github.com/EDXFund/Validator/trie"
    37  )
    38  
    39  var (
    40  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    41  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    42  )
    43  
    44  // Reduce some of the parameters to make the tester faster.
    45  func init() {
    46  	MaxForkAncestry = uint64(10000)
    47  	blockCacheItems = 1024
    48  	fsHeaderContCheck = 500 * time.Millisecond
    49  }
    50  
    51  // downloadTester is a test simulator for mocking out local block chain.
    52  type downloadTester struct {
    53  	downloader *Downloader
    54  
    55  	genesis *types.Block   // Genesis blocks used by the tester and peers
    56  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    57  	peerDb  ethdb.Database // Database of the peers containing all data
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    66  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    67  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    68  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    69  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    70  
    71  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    72  
    73  	lock sync.RWMutex
    74  }
    75  
    76  // newTester creates a new downloader test mocker.
    77  func newTester() *downloadTester {
    78  	testdb := ethdb.NewMemDatabase()
    79  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    80  
    81  	tester := &downloadTester{
    82  		genesis:           genesis,
    83  		peerDb:            testdb,
    84  		ownHashes:         []common.Hash{genesis.Hash()},
    85  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    86  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    87  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    88  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    89  		peerHashes:        make(map[string][]common.Hash),
    90  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    91  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    92  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    93  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    94  		peerMissingStates: make(map[string]map[common.Hash]bool),
    95  	}
    96  	tester.stateDb = ethdb.NewMemDatabase()
    97  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    98  
    99  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   100  
   101  	return tester
   102  }
   103  
   104  // makeChain creates a chain of n blocks starting at and including parent.
   105  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   106  // contains a transaction and every 5th an uncle to allow testing correct block
   107  // reassembly.
   108  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   109  	// Generate the block chain
   110  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), dl.peerDb, n, func(i int, block *core.BlockGen) {
   111  		block.SetCoinbase(common.Address{seed})
   112  
   113  		// If a heavy chain is requested, delay blocks to raise difficulty
   114  		if heavy {
   115  			block.OffsetTime(-1)
   116  		}
   117  		// If the block number is multiple of 3, send a bonus transaction to the miner
   118  		if parent == dl.genesis && i%3 == 0 {
   119  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   120  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   121  			if err != nil {
   122  				panic(err)
   123  			}
   124  			block.AddTx(tx)
   125  		}
   126  		// If the block number is a multiple of 5, add a bonus uncle to the block
   127  		if i > 0 && i%5 == 0 {
   128  			block.AddUncle(&types.Header{
   129  				ParentHash: block.PrevBlock(i - 1).Hash(),
   130  				Number:     big.NewInt(block.Number().Int64() - 1),
   131  			})
   132  		}
   133  	})
   134  	// Convert the block-chain into a hash-chain and header/block maps
   135  	hashes := make([]common.Hash, n+1)
   136  	hashes[len(hashes)-1] = parent.Hash()
   137  
   138  	headerm := make(map[common.Hash]*types.Header, n+1)
   139  	headerm[parent.Hash()] = parent.Header()
   140  
   141  	blockm := make(map[common.Hash]*types.Block, n+1)
   142  	blockm[parent.Hash()] = parent
   143  
   144  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   145  	receiptm[parent.Hash()] = parentReceipts
   146  
   147  	for i, b := range blocks {
   148  		hashes[len(hashes)-i-2] = b.Hash()
   149  		headerm[b.Hash()] = b.Header()
   150  		blockm[b.Hash()] = b
   151  		receiptm[b.Hash()] = receipts[i]
   152  	}
   153  	return hashes, headerm, blockm, receiptm
   154  }
   155  
   156  // makeChainFork creates two chains of length n, such that h1[:f] and
   157  // h2[:f] are different but have a common suffix of length n-f.
   158  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   159  	// Create the common suffix
   160  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   161  
   162  	// Create the forks, making the second heavier if non balanced forks were requested
   163  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   164  	hashes1 = append(hashes1, hashes[1:]...)
   165  
   166  	heavy := false
   167  	if !balanced {
   168  		heavy = true
   169  	}
   170  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   171  	hashes2 = append(hashes2, hashes[1:]...)
   172  
   173  	for hash, header := range headers {
   174  		headers1[hash] = header
   175  		headers2[hash] = header
   176  	}
   177  	for hash, block := range blocks {
   178  		blocks1[hash] = block
   179  		blocks2[hash] = block
   180  	}
   181  	for hash, receipt := range receipts {
   182  		receipts1[hash] = receipt
   183  		receipts2[hash] = receipt
   184  	}
   185  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   186  }
   187  
   188  // terminate aborts any operations on the embedded downloader and releases all
   189  // held resources.
   190  func (dl *downloadTester) terminate() {
   191  	dl.downloader.Terminate()
   192  }
   193  
   194  // sync starts synchronizing with a remote peer, blocking until it completes.
   195  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   196  	dl.lock.RLock()
   197  	hash := dl.peerHashes[id][0]
   198  	// If no particular TD was requested, load from the peer's blockchain
   199  	if td == nil {
   200  		td = big.NewInt(1)
   201  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   202  			td = diff
   203  		}
   204  	}
   205  	dl.lock.RUnlock()
   206  
   207  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   208  	err := dl.downloader.synchronise(id, hash, td, mode)
   209  	select {
   210  	case <-dl.downloader.cancelCh:
   211  		// Ok, downloader fully cancelled after sync cycle
   212  	default:
   213  		// Downloader is still accepting packets, can block a peer up
   214  		panic("downloader active post sync cycle") // panic will be caught by tester
   215  	}
   216  	return err
   217  }
   218  
   219  // HasHeader checks if a header is present in the testers canonical chain.
   220  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   221  	return dl.GetHeaderByHash(hash) != nil
   222  }
   223  
   224  // HasBlock checks if a block is present in the testers canonical chain.
   225  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   226  	return dl.GetBlockByHash(hash) != nil
   227  }
   228  
   229  // GetHeader retrieves a header from the testers canonical chain.
   230  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   231  	dl.lock.RLock()
   232  	defer dl.lock.RUnlock()
   233  
   234  	return dl.ownHeaders[hash]
   235  }
   236  
   237  // GetBlock retrieves a block from the testers canonical chain.
   238  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   239  	dl.lock.RLock()
   240  	defer dl.lock.RUnlock()
   241  	return dl.ownBlocks[hash]
   242  }
   243  
   244  // CurrentHeader retrieves the current head header from the canonical chain.
   245  func (dl *downloadTester) CurrentHeader() *types.Header {
   246  	dl.lock.RLock()
   247  	defer dl.lock.RUnlock()
   248  
   249  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   250  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   251  			return header
   252  		}
   253  	}
   254  	return dl.genesis.Header()
   255  }
   256  
   257  // CurrentBlock retrieves the current head block from the canonical chain.
   258  func (dl *downloadTester) CurrentBlock() *types.Block {
   259  	dl.lock.RLock()
   260  	defer dl.lock.RUnlock()
   261  
   262  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   263  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   264  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   265  				return block
   266  			}
   267  		}
   268  	}
   269  	return dl.genesis
   270  }
   271  
   272  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   273  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   274  	dl.lock.RLock()
   275  	defer dl.lock.RUnlock()
   276  
   277  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   278  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   279  			return block
   280  		}
   281  	}
   282  	return dl.genesis
   283  }
   284  
   285  // FastSyncCommitHead manually sets the head block to a given hash.
   286  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   287  	// For now only check that the state trie is correct
   288  	if block := dl.GetBlockByHash(hash); block != nil {
   289  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0)
   290  		return err
   291  	}
   292  	return fmt.Errorf("non existent block: %x", hash[:4])
   293  }
   294  
   295  // GetTd retrieves the block's total difficulty from the canonical chain.
   296  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   297  	dl.lock.RLock()
   298  	defer dl.lock.RUnlock()
   299  
   300  	return dl.ownChainTd[hash]
   301  }
   302  
   303  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   304  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   305  	dl.lock.Lock()
   306  	defer dl.lock.Unlock()
   307  
   308  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   309  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   310  		return 0, errors.New("unknown parent")
   311  	}
   312  	for i := 1; i < len(headers); i++ {
   313  		if headers[i].ParentHash != headers[i-1].Hash() {
   314  			return i, errors.New("unknown parent")
   315  		}
   316  	}
   317  	// Do a full insert if pre-checks passed
   318  	for i, header := range headers {
   319  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   320  			continue
   321  		}
   322  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   323  			return i, errors.New("unknown parent")
   324  		}
   325  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   326  		dl.ownHeaders[header.Hash()] = header
   327  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   328  	}
   329  	return len(headers), nil
   330  }
   331  
   332  // InsertChain injects a new batch of blocks into the simulated chain.
   333  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   334  	dl.lock.Lock()
   335  	defer dl.lock.Unlock()
   336  
   337  	for i, block := range blocks {
   338  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   339  			return i, errors.New("unknown parent")
   340  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   341  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   342  		}
   343  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   344  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   345  			dl.ownHeaders[block.Hash()] = block.Header()
   346  		}
   347  		dl.ownBlocks[block.Hash()] = block
   348  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   349  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   350  	}
   351  	return len(blocks), nil
   352  }
   353  
   354  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   355  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   356  	dl.lock.Lock()
   357  	defer dl.lock.Unlock()
   358  
   359  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   360  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   361  			return i, errors.New("unknown owner")
   362  		}
   363  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   364  			return i, errors.New("unknown parent")
   365  		}
   366  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   367  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   368  	}
   369  	return len(blocks), nil
   370  }
   371  
   372  // Rollback removes some recently added elements from the chain.
   373  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   374  	dl.lock.Lock()
   375  	defer dl.lock.Unlock()
   376  
   377  	for i := len(hashes) - 1; i >= 0; i-- {
   378  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   379  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   380  		}
   381  		delete(dl.ownChainTd, hashes[i])
   382  		delete(dl.ownHeaders, hashes[i])
   383  		delete(dl.ownReceipts, hashes[i])
   384  		delete(dl.ownBlocks, hashes[i])
   385  	}
   386  }
   387  
   388  // newPeer registers a new block download source into the downloader.
   389  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   390  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   391  }
   392  
   393  // newSlowPeer registers a new block download source into the downloader, with a
   394  // specific delay time on processing the network packets sent to it, simulating
   395  // potentially slow network IO.
   396  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   397  	dl.lock.Lock()
   398  	defer dl.lock.Unlock()
   399  
   400  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   401  	if err == nil {
   402  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   403  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   404  		copy(dl.peerHashes[id], hashes)
   405  
   406  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   407  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   408  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   409  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   410  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   411  
   412  		genesis := hashes[len(hashes)-1]
   413  		if header := headers[genesis]; header != nil {
   414  			dl.peerHeaders[id][genesis] = header
   415  			dl.peerChainTds[id][genesis] = header.Difficulty
   416  		}
   417  		if block := blocks[genesis]; block != nil {
   418  			dl.peerBlocks[id][genesis] = block
   419  			dl.peerChainTds[id][genesis] = block.Difficulty()
   420  		}
   421  
   422  		for i := len(hashes) - 2; i >= 0; i-- {
   423  			hash := hashes[i]
   424  
   425  			if header, ok := headers[hash]; ok {
   426  				dl.peerHeaders[id][hash] = header
   427  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   428  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   429  				}
   430  			}
   431  			if block, ok := blocks[hash]; ok {
   432  				dl.peerBlocks[id][hash] = block
   433  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   434  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   435  				}
   436  			}
   437  			if receipt, ok := receipts[hash]; ok {
   438  				dl.peerReceipts[id][hash] = receipt
   439  			}
   440  		}
   441  	}
   442  	return err
   443  }
   444  
   445  // dropPeer simulates a hard peer removal from the connection pool.
   446  func (dl *downloadTester) dropPeer(id string) {
   447  	dl.lock.Lock()
   448  	defer dl.lock.Unlock()
   449  
   450  	delete(dl.peerHashes, id)
   451  	delete(dl.peerHeaders, id)
   452  	delete(dl.peerBlocks, id)
   453  	delete(dl.peerChainTds, id)
   454  
   455  	dl.downloader.UnregisterPeer(id)
   456  }
   457  
   458  type downloadTesterPeer struct {
   459  	dl    *downloadTester
   460  	id    string
   461  	delay time.Duration
   462  	lock  sync.RWMutex
   463  }
   464  
   465  // setDelay is a thread safe setter for the network delay value.
   466  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   467  	dlp.lock.Lock()
   468  	defer dlp.lock.Unlock()
   469  
   470  	dlp.delay = delay
   471  }
   472  
   473  // waitDelay is a thread safe way to sleep for the configured time.
   474  func (dlp *downloadTesterPeer) waitDelay() {
   475  	dlp.lock.RLock()
   476  	delay := dlp.delay
   477  	dlp.lock.RUnlock()
   478  
   479  	time.Sleep(delay)
   480  }
   481  
   482  // Head constructs a function to retrieve a peer's current head hash
   483  // and total difficulty.
   484  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   485  	dlp.dl.lock.RLock()
   486  	defer dlp.dl.lock.RUnlock()
   487  
   488  	return dlp.dl.peerHashes[dlp.id][0], nil
   489  }
   490  
   491  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   492  // origin; associated with a particular peer in the download tester. The returned
   493  // function can be used to retrieve batches of headers from the particular peer.
   494  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   495  	// Find the canonical number of the hash
   496  	dlp.dl.lock.RLock()
   497  	number := uint64(0)
   498  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   499  		if hash == origin {
   500  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   501  			break
   502  		}
   503  	}
   504  	dlp.dl.lock.RUnlock()
   505  
   506  	// Use the absolute header fetcher to satisfy the query
   507  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   508  }
   509  
   510  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   511  // origin; associated with a particular peer in the download tester. The returned
   512  // function can be used to retrieve batches of headers from the particular peer.
   513  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   514  	dlp.waitDelay()
   515  
   516  	dlp.dl.lock.RLock()
   517  	defer dlp.dl.lock.RUnlock()
   518  
   519  	// Gather the next batch of headers
   520  	hashes := dlp.dl.peerHashes[dlp.id]
   521  	headers := dlp.dl.peerHeaders[dlp.id]
   522  	result := make([]*types.Header, 0, amount)
   523  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   524  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   525  			result = append(result, header)
   526  		}
   527  	}
   528  	// Delay delivery a bit to allow attacks to unfold
   529  	go func() {
   530  		time.Sleep(time.Millisecond)
   531  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   532  	}()
   533  	return nil
   534  }
   535  
   536  // RequestBodies constructs a getBlockBodies method associated with a particular
   537  // peer in the download tester. The returned function can be used to retrieve
   538  // batches of block bodies from the particularly requested peer.
   539  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   540  	dlp.waitDelay()
   541  
   542  	dlp.dl.lock.RLock()
   543  	defer dlp.dl.lock.RUnlock()
   544  
   545  	blocks := dlp.dl.peerBlocks[dlp.id]
   546  
   547  	transactions := make([][]*types.Transaction, 0, len(hashes))
   548  	uncles := make([][]*types.Header, 0, len(hashes))
   549  
   550  	for _, hash := range hashes {
   551  		if block, ok := blocks[hash]; ok {
   552  			transactions = append(transactions, block.Transactions())
   553  			uncles = append(uncles, block.Uncles())
   554  		}
   555  	}
   556  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   557  
   558  	return nil
   559  }
   560  
   561  // RequestReceipts constructs a getReceipts method associated with a particular
   562  // peer in the download tester. The returned function can be used to retrieve
   563  // batches of block receipts from the particularly requested peer.
   564  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   565  	dlp.waitDelay()
   566  
   567  	dlp.dl.lock.RLock()
   568  	defer dlp.dl.lock.RUnlock()
   569  
   570  	receipts := dlp.dl.peerReceipts[dlp.id]
   571  
   572  	results := make([][]*types.Receipt, 0, len(hashes))
   573  	for _, hash := range hashes {
   574  		if receipt, ok := receipts[hash]; ok {
   575  			results = append(results, receipt)
   576  		}
   577  	}
   578  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   579  
   580  	return nil
   581  }
   582  
   583  // RequestNodeData constructs a getNodeData method associated with a particular
   584  // peer in the download tester. The returned function can be used to retrieve
   585  // batches of node state data from the particularly requested peer.
   586  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   587  	dlp.waitDelay()
   588  
   589  	dlp.dl.lock.RLock()
   590  	defer dlp.dl.lock.RUnlock()
   591  
   592  	results := make([][]byte, 0, len(hashes))
   593  	for _, hash := range hashes {
   594  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   595  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   596  				results = append(results, data)
   597  			}
   598  		}
   599  	}
   600  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   601  
   602  	return nil
   603  }
   604  
   605  // assertOwnChain checks if the local chain contains the correct number of items
   606  // of the various chain components.
   607  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   608  	assertOwnForkedChain(t, tester, 1, []int{length})
   609  }
   610  
   611  // assertOwnForkedChain checks if the local forked chain contains the correct
   612  // number of items of the various chain components.
   613  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   614  	// Initialize the counters for the first fork
   615  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   616  
   617  	if receipts < 0 {
   618  		receipts = 1
   619  	}
   620  	// Update the counters for each subsequent fork
   621  	for _, length := range lengths[1:] {
   622  		headers += length - common
   623  		blocks += length - common
   624  		receipts += length - common - fsMinFullBlocks
   625  	}
   626  	switch tester.downloader.mode {
   627  	case FullSync:
   628  		receipts = 1
   629  	case LightSync:
   630  		blocks, receipts = 1, 1
   631  	}
   632  	if hs := len(tester.ownHeaders); hs != headers {
   633  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   634  	}
   635  	if bs := len(tester.ownBlocks); bs != blocks {
   636  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   637  	}
   638  	if rs := len(tester.ownReceipts); rs != receipts {
   639  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   640  	}
   641  	// Verify the state trie too for fast syncs
   642  	/*if tester.downloader.mode == FastSync {
   643  		pivot := uint64(0)
   644  		var index int
   645  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   646  			index = pivot
   647  		} else {
   648  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   649  		}
   650  		if index > 0 {
   651  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   652  				t.Fatalf("state reconstruction failed: %v", err)
   653  			}
   654  		}
   655  	}*/
   656  }
   657  
   658  // Tests that simple synchronization against a canonical chain works correctly.
   659  // In this test common ancestor lookup should be short circuited and not require
   660  // binary searching.
   661  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   662  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   663  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   664  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   665  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   666  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   667  
   668  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   669  	t.Parallel()
   670  
   671  	tester := newTester()
   672  	defer tester.terminate()
   673  
   674  	// Create a small enough block chain to download
   675  	targetBlocks := blockCacheItems - 15
   676  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   677  
   678  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   679  
   680  	// Synchronise with the peer and make sure all relevant data was retrieved
   681  	if err := tester.sync("peer", nil, mode); err != nil {
   682  		t.Fatalf("failed to synchronise blocks: %v", err)
   683  	}
   684  	assertOwnChain(t, tester, targetBlocks+1)
   685  }
   686  
   687  // Tests that if a large batch of blocks are being downloaded, it is throttled
   688  // until the cached blocks are retrieved.
   689  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   690  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   691  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   692  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   693  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   694  
   695  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   696  	t.Parallel()
   697  	tester := newTester()
   698  	defer tester.terminate()
   699  
   700  	// Create a long block chain to download and the tester
   701  	targetBlocks := 8 * blockCacheItems
   702  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   703  
   704  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   705  
   706  	// Wrap the importer to allow stepping
   707  	blocked, proceed := uint32(0), make(chan struct{})
   708  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   709  		atomic.StoreUint32(&blocked, uint32(len(results)))
   710  		<-proceed
   711  	}
   712  	// Start a synchronisation concurrently
   713  	errc := make(chan error)
   714  	go func() {
   715  		errc <- tester.sync("peer", nil, mode)
   716  	}()
   717  	// Iteratively take some blocks, always checking the retrieval count
   718  	for {
   719  		// Check the retrieval count synchronously (! reason for this ugly block)
   720  		tester.lock.RLock()
   721  		retrieved := len(tester.ownBlocks)
   722  		tester.lock.RUnlock()
   723  		if retrieved >= targetBlocks+1 {
   724  			break
   725  		}
   726  		// Wait a bit for sync to throttle itself
   727  		var cached, frozen int
   728  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   729  			time.Sleep(25 * time.Millisecond)
   730  
   731  			tester.lock.Lock()
   732  			tester.downloader.queue.lock.Lock()
   733  			cached = len(tester.downloader.queue.blockDonePool)
   734  			if mode == FastSync {
   735  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   736  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   737  					cached = receipts
   738  					//}
   739  				}
   740  			}
   741  			frozen = int(atomic.LoadUint32(&blocked))
   742  			retrieved = len(tester.ownBlocks)
   743  			tester.downloader.queue.lock.Unlock()
   744  			tester.lock.Unlock()
   745  
   746  			if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
   747  				break
   748  			}
   749  		}
   750  		// Make sure we filled up the cache, then exhaust it
   751  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   752  
   753  		tester.lock.RLock()
   754  		retrieved = len(tester.ownBlocks)
   755  		tester.lock.RUnlock()
   756  		if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
   757  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   758  		}
   759  		// Permit the blocked blocks to import
   760  		if atomic.LoadUint32(&blocked) > 0 {
   761  			atomic.StoreUint32(&blocked, uint32(0))
   762  			proceed <- struct{}{}
   763  		}
   764  	}
   765  	// Check that we haven't pulled more blocks than available
   766  	assertOwnChain(t, tester, targetBlocks+1)
   767  	if err := <-errc; err != nil {
   768  		t.Fatalf("block synchronization failed: %v", err)
   769  	}
   770  }
   771  
   772  // Tests that simple synchronization against a forked chain works correctly. In
   773  // this test common ancestor lookup should *not* be short circuited, and a full
   774  // binary search should be executed.
   775  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   776  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   777  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   778  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   779  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   780  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   781  
   782  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   783  	t.Parallel()
   784  
   785  	tester := newTester()
   786  	defer tester.terminate()
   787  
   788  	// Create a long enough forked chain
   789  	common, fork := MaxHashFetch, 2*MaxHashFetch
   790  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   791  
   792  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   793  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   794  
   795  	// Synchronise with the peer and make sure all blocks were retrieved
   796  	if err := tester.sync("fork A", nil, mode); err != nil {
   797  		t.Fatalf("failed to synchronise blocks: %v", err)
   798  	}
   799  	assertOwnChain(t, tester, common+fork+1)
   800  
   801  	// Synchronise with the second peer and make sure that fork is pulled too
   802  	if err := tester.sync("fork B", nil, mode); err != nil {
   803  		t.Fatalf("failed to synchronise blocks: %v", err)
   804  	}
   805  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   806  }
   807  
   808  // Tests that synchronising against a much shorter but much heavyer fork works
   809  // corrently and is not dropped.
   810  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   811  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   812  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   813  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   814  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   815  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   816  
   817  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   818  	t.Parallel()
   819  
   820  	tester := newTester()
   821  	defer tester.terminate()
   822  
   823  	// Create a long enough forked chain
   824  	common, fork := MaxHashFetch, 4*MaxHashFetch
   825  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   826  
   827  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   828  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   829  
   830  	// Synchronise with the peer and make sure all blocks were retrieved
   831  	if err := tester.sync("light", nil, mode); err != nil {
   832  		t.Fatalf("failed to synchronise blocks: %v", err)
   833  	}
   834  	assertOwnChain(t, tester, common+fork+1)
   835  
   836  	// Synchronise with the second peer and make sure that fork is pulled too
   837  	if err := tester.sync("heavy", nil, mode); err != nil {
   838  		t.Fatalf("failed to synchronise blocks: %v", err)
   839  	}
   840  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   841  }
   842  
   843  // Tests that chain forks are contained within a certain interval of the current
   844  // chain head, ensuring that malicious peers cannot waste resources by feeding
   845  // long dead chains.
   846  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   847  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   848  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   849  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   850  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   851  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   852  
   853  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   854  	t.Parallel()
   855  
   856  	tester := newTester()
   857  	defer tester.terminate()
   858  
   859  	// Create a long enough forked chain
   860  	common, fork := 13, int(MaxForkAncestry+17)
   861  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   862  
   863  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   864  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   865  
   866  	// Synchronise with the peer and make sure all blocks were retrieved
   867  	if err := tester.sync("original", nil, mode); err != nil {
   868  		t.Fatalf("failed to synchronise blocks: %v", err)
   869  	}
   870  	assertOwnChain(t, tester, common+fork+1)
   871  
   872  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   873  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   874  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   875  	}
   876  }
   877  
   878  // Tests that chain forks are contained within a certain interval of the current
   879  // chain head for short but heavy forks too. These are a bit special because they
   880  // take different ancestor lookup paths.
   881  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   882  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   883  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   884  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   885  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   886  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   887  
   888  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   889  	t.Parallel()
   890  
   891  	tester := newTester()
   892  	defer tester.terminate()
   893  
   894  	// Create a long enough forked chain
   895  	common, fork := 13, int(MaxForkAncestry+17)
   896  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   897  
   898  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   899  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   900  
   901  	// Synchronise with the peer and make sure all blocks were retrieved
   902  	if err := tester.sync("original", nil, mode); err != nil {
   903  		t.Fatalf("failed to synchronise blocks: %v", err)
   904  	}
   905  	assertOwnChain(t, tester, common+fork+1)
   906  
   907  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   908  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   909  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   910  	}
   911  }
   912  
   913  // Tests that an inactive downloader will not accept incoming block headers and
   914  // bodies.
   915  func TestInactiveDownloader62(t *testing.T) {
   916  	t.Parallel()
   917  
   918  	tester := newTester()
   919  	defer tester.terminate()
   920  
   921  	// Check that neither block headers nor bodies are accepted
   922  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   923  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   924  	}
   925  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   926  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   927  	}
   928  }
   929  
   930  // Tests that an inactive downloader will not accept incoming block headers,
   931  // bodies and receipts.
   932  func TestInactiveDownloader63(t *testing.T) {
   933  	t.Parallel()
   934  
   935  	tester := newTester()
   936  	defer tester.terminate()
   937  
   938  	// Check that neither block headers nor bodies are accepted
   939  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   940  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   941  	}
   942  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   943  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   944  	}
   945  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   946  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   947  	}
   948  }
   949  
   950  // Tests that a canceled download wipes all previously accumulated state.
   951  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   952  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   953  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   954  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   955  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   956  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   957  
   958  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   959  	t.Parallel()
   960  
   961  	tester := newTester()
   962  	defer tester.terminate()
   963  
   964  	// Create a small enough block chain to download and the tester
   965  	targetBlocks := blockCacheItems - 15
   966  	if targetBlocks >= MaxHashFetch {
   967  		targetBlocks = MaxHashFetch - 15
   968  	}
   969  	if targetBlocks >= MaxHeaderFetch {
   970  		targetBlocks = MaxHeaderFetch - 15
   971  	}
   972  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   973  
   974  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   975  
   976  	// Make sure canceling works with a pristine downloader
   977  	tester.downloader.Cancel()
   978  	if !tester.downloader.queue.Idle() {
   979  		t.Errorf("download queue not idle")
   980  	}
   981  	// Synchronise with the peer, but cancel afterwards
   982  	if err := tester.sync("peer", nil, mode); err != nil {
   983  		t.Fatalf("failed to synchronise blocks: %v", err)
   984  	}
   985  	tester.downloader.Cancel()
   986  	if !tester.downloader.queue.Idle() {
   987  		t.Errorf("download queue not idle")
   988  	}
   989  }
   990  
   991  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   992  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   993  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   994  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   995  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   996  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   997  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   998  
   999  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1000  	t.Parallel()
  1001  
  1002  	tester := newTester()
  1003  	defer tester.terminate()
  1004  
  1005  	// Create various peers with various parts of the chain
  1006  	targetPeers := 8
  1007  	targetBlocks := targetPeers*blockCacheItems - 15
  1008  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1009  
  1010  	for i := 0; i < targetPeers; i++ {
  1011  		id := fmt.Sprintf("peer #%d", i)
  1012  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
  1013  	}
  1014  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1015  		t.Fatalf("failed to synchronise blocks: %v", err)
  1016  	}
  1017  	assertOwnChain(t, tester, targetBlocks+1)
  1018  }
  1019  
  1020  // Tests that synchronisations behave well in multi-version protocol environments
  1021  // and not wreak havoc on other nodes in the network.
  1022  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1023  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1024  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1025  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1026  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1027  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1028  
  1029  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1030  	t.Parallel()
  1031  
  1032  	tester := newTester()
  1033  	defer tester.terminate()
  1034  
  1035  	// Create a small enough block chain to download
  1036  	targetBlocks := blockCacheItems - 15
  1037  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1038  
  1039  	// Create peers of every type
  1040  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1041  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1042  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1043  
  1044  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1045  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1046  		t.Fatalf("failed to synchronise blocks: %v", err)
  1047  	}
  1048  	assertOwnChain(t, tester, targetBlocks+1)
  1049  
  1050  	// Check that no peers have been dropped off
  1051  	for _, version := range []int{62, 63, 64} {
  1052  		peer := fmt.Sprintf("peer %d", version)
  1053  		if _, ok := tester.peerHashes[peer]; !ok {
  1054  			t.Errorf("%s dropped", peer)
  1055  		}
  1056  	}
  1057  }
  1058  
  1059  // Tests that if a block is empty (e.g. header only), no body request should be
  1060  // made, and instead the header should be assembled into a whole block in itself.
  1061  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1062  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1063  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1064  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1065  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1066  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1067  
  1068  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1069  	t.Parallel()
  1070  
  1071  	tester := newTester()
  1072  	defer tester.terminate()
  1073  
  1074  	// Create a block chain to download
  1075  	targetBlocks := 2*blockCacheItems - 15
  1076  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1077  
  1078  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1079  
  1080  	// Instrument the downloader to signal body requests
  1081  	bodiesHave, receiptsHave := int32(0), int32(0)
  1082  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1083  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1084  	}
  1085  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1086  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1087  	}
  1088  	// Synchronise with the peer and make sure all blocks were retrieved
  1089  	if err := tester.sync("peer", nil, mode); err != nil {
  1090  		t.Fatalf("failed to synchronise blocks: %v", err)
  1091  	}
  1092  	assertOwnChain(t, tester, targetBlocks+1)
  1093  
  1094  	// Validate the number of block bodies that should have been requested
  1095  	bodiesNeeded, receiptsNeeded := 0, 0
  1096  	for _, block := range blocks {
  1097  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1098  			bodiesNeeded++
  1099  		}
  1100  	}
  1101  	for _, receipt := range receipts {
  1102  		if mode == FastSync && len(receipt) > 0 {
  1103  			receiptsNeeded++
  1104  		}
  1105  	}
  1106  	if int(bodiesHave) != bodiesNeeded {
  1107  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1108  	}
  1109  	if int(receiptsHave) != receiptsNeeded {
  1110  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1111  	}
  1112  }
  1113  
  1114  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1115  // stalling the downloader by feeding gapped header chains.
  1116  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1117  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1118  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1119  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1120  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1121  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1122  
  1123  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1124  	t.Parallel()
  1125  
  1126  	tester := newTester()
  1127  	defer tester.terminate()
  1128  
  1129  	// Create a small enough block chain to download
  1130  	targetBlocks := blockCacheItems - 15
  1131  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1132  
  1133  	// Attempt a full sync with an attacker feeding gapped headers
  1134  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1135  	missing := targetBlocks / 2
  1136  	delete(tester.peerHeaders["attack"], hashes[missing])
  1137  
  1138  	if err := tester.sync("attack", nil, mode); err == nil {
  1139  		t.Fatalf("succeeded attacker synchronisation")
  1140  	}
  1141  	// Synchronise with the valid peer and make sure sync succeeds
  1142  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1143  	if err := tester.sync("valid", nil, mode); err != nil {
  1144  		t.Fatalf("failed to synchronise blocks: %v", err)
  1145  	}
  1146  	assertOwnChain(t, tester, targetBlocks+1)
  1147  }
  1148  
  1149  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1150  // detects the invalid numbering.
  1151  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1152  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1153  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1154  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1155  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1156  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1157  
  1158  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1159  	t.Parallel()
  1160  
  1161  	tester := newTester()
  1162  	defer tester.terminate()
  1163  
  1164  	// Create a small enough block chain to download
  1165  	targetBlocks := blockCacheItems - 15
  1166  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1167  
  1168  	// Attempt a full sync with an attacker feeding shifted headers
  1169  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1170  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1171  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1172  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1173  
  1174  	if err := tester.sync("attack", nil, mode); err == nil {
  1175  		t.Fatalf("succeeded attacker synchronisation")
  1176  	}
  1177  	// Synchronise with the valid peer and make sure sync succeeds
  1178  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1179  	if err := tester.sync("valid", nil, mode); err != nil {
  1180  		t.Fatalf("failed to synchronise blocks: %v", err)
  1181  	}
  1182  	assertOwnChain(t, tester, targetBlocks+1)
  1183  }
  1184  
  1185  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1186  // for various failure scenarios. Afterwards a full sync is attempted to make
  1187  // sure no state was corrupted.
  1188  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1189  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1190  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1191  
  1192  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1193  	t.Parallel()
  1194  
  1195  	tester := newTester()
  1196  	defer tester.terminate()
  1197  
  1198  	// Create a small enough block chain to download
  1199  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1200  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1201  
  1202  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1203  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1204  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1205  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1206  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1207  
  1208  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1209  		t.Fatalf("succeeded fast attacker synchronisation")
  1210  	}
  1211  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1212  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1213  	}
  1214  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1215  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1216  	// rolled back, and also the pivot point being reverted to a non-block status.
  1217  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1218  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1219  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1220  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1221  
  1222  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1223  		t.Fatalf("succeeded block attacker synchronisation")
  1224  	}
  1225  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1226  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1227  	}
  1228  	if mode == FastSync {
  1229  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1230  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1231  		}
  1232  	}
  1233  	// Attempt to sync with an attacker that withholds promised blocks after the
  1234  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1235  	// but already imported pivot block.
  1236  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1237  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1238  
  1239  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1240  		for i := missing; i <= len(hashes); i++ {
  1241  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1242  		}
  1243  		tester.downloader.syncInitHook = nil
  1244  	}
  1245  
  1246  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1247  		t.Fatalf("succeeded withholding attacker synchronisation")
  1248  	}
  1249  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1250  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1251  	}
  1252  	if mode == FastSync {
  1253  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1254  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1255  		}
  1256  	}
  1257  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1258  	// rollback should also disable fast syncing for this process, verify that we
  1259  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1260  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1261  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1262  	if err := tester.sync("valid", nil, mode); err != nil {
  1263  		t.Fatalf("failed to synchronise blocks: %v", err)
  1264  	}
  1265  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1266  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1267  	}
  1268  	if mode != LightSync {
  1269  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1270  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1271  		}
  1272  	}
  1273  }
  1274  
  1275  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1276  // afterwards by not sending any useful hashes.
  1277  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1278  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1279  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1280  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1281  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1282  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1283  
  1284  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1285  	t.Parallel()
  1286  
  1287  	tester := newTester()
  1288  	defer tester.terminate()
  1289  
  1290  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1291  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1292  
  1293  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1294  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1295  	}
  1296  }
  1297  
  1298  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1299  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1300  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1301  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1302  
  1303  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1304  	t.Parallel()
  1305  
  1306  	// Define the disconnection requirement for individual hash fetch errors
  1307  	tests := []struct {
  1308  		result error
  1309  		drop   bool
  1310  	}{
  1311  		{nil, false},                        // Sync succeeded, all is well
  1312  		{errBusy, false},                    // Sync is already in progress, no problem
  1313  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1314  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1315  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1316  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1317  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1318  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1319  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1320  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1321  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1322  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1323  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1324  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1325  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1326  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1327  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1328  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1329  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1330  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1331  	}
  1332  	// Run the tests and check disconnection status
  1333  	tester := newTester()
  1334  	defer tester.terminate()
  1335  
  1336  	for i, tt := range tests {
  1337  		// Register a new peer and ensure it's presence
  1338  		id := fmt.Sprintf("test %d", i)
  1339  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1340  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1341  		}
  1342  		if _, ok := tester.peerHashes[id]; !ok {
  1343  			t.Fatalf("test %d: registered peer not found", i)
  1344  		}
  1345  		// Simulate a synchronisation and check the required result
  1346  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1347  
  1348  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1349  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1350  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1351  		}
  1352  	}
  1353  }
  1354  
  1355  // Tests that synchronisation progress (origin block number, current block number
  1356  // and highest block number) is tracked and updated correctly.
  1357  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1358  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1359  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1360  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1361  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1362  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1363  
  1364  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1365  	t.Parallel()
  1366  
  1367  	tester := newTester()
  1368  	defer tester.terminate()
  1369  
  1370  	// Create a small enough block chain to download
  1371  	targetBlocks := blockCacheItems - 15
  1372  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1373  
  1374  	// Set a sync init hook to catch progress changes
  1375  	starting := make(chan struct{})
  1376  	progress := make(chan struct{})
  1377  
  1378  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1379  		starting <- struct{}{}
  1380  		<-progress
  1381  	}
  1382  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1383  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1384  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1385  	}
  1386  	// Synchronise half the blocks and check initial progress
  1387  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1388  	pending := new(sync.WaitGroup)
  1389  	pending.Add(1)
  1390  
  1391  	go func() {
  1392  		defer pending.Done()
  1393  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1394  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1395  		}
  1396  	}()
  1397  	<-starting
  1398  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1399  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1400  	}
  1401  	progress <- struct{}{}
  1402  	pending.Wait()
  1403  
  1404  	// Synchronise all the blocks and check continuation progress
  1405  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1406  	pending.Add(1)
  1407  
  1408  	go func() {
  1409  		defer pending.Done()
  1410  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1411  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1412  		}
  1413  	}()
  1414  	<-starting
  1415  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1416  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1417  	}
  1418  	progress <- struct{}{}
  1419  	pending.Wait()
  1420  
  1421  	// Check final progress after successful sync
  1422  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1423  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1424  	}
  1425  }
  1426  
  1427  // Tests that synchronisation progress (origin block number and highest block
  1428  // number) is tracked and updated correctly in case of a fork (or manual head
  1429  // revertal).
  1430  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1431  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1432  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1433  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1434  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1435  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1436  
  1437  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1438  	t.Parallel()
  1439  
  1440  	tester := newTester()
  1441  	defer tester.terminate()
  1442  
  1443  	// Create a forked chain to simulate origin revertal
  1444  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1445  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1446  
  1447  	// Set a sync init hook to catch progress changes
  1448  	starting := make(chan struct{})
  1449  	progress := make(chan struct{})
  1450  
  1451  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1452  		starting <- struct{}{}
  1453  		<-progress
  1454  	}
  1455  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1456  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1457  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1458  	}
  1459  	// Synchronise with one of the forks and check progress
  1460  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1461  	pending := new(sync.WaitGroup)
  1462  	pending.Add(1)
  1463  
  1464  	go func() {
  1465  		defer pending.Done()
  1466  		if err := tester.sync("fork A", nil, mode); err != nil {
  1467  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1468  		}
  1469  	}()
  1470  	<-starting
  1471  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1472  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1473  	}
  1474  	progress <- struct{}{}
  1475  	pending.Wait()
  1476  
  1477  	// Simulate a successful sync above the fork
  1478  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1479  
  1480  	// Synchronise with the second fork and check progress resets
  1481  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1482  	pending.Add(1)
  1483  
  1484  	go func() {
  1485  		defer pending.Done()
  1486  		if err := tester.sync("fork B", nil, mode); err != nil {
  1487  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1488  		}
  1489  	}()
  1490  	<-starting
  1491  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1492  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1493  	}
  1494  	progress <- struct{}{}
  1495  	pending.Wait()
  1496  
  1497  	// Check final progress after successful sync
  1498  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1499  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1500  	}
  1501  }
  1502  
  1503  // Tests that if synchronisation is aborted due to some failure, then the progress
  1504  // origin is not updated in the next sync cycle, as it should be considered the
  1505  // continuation of the previous sync and not a new instance.
  1506  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1507  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1508  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1509  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1510  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1511  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1512  
  1513  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1514  	t.Parallel()
  1515  
  1516  	tester := newTester()
  1517  	defer tester.terminate()
  1518  
  1519  	// Create a small enough block chain to download
  1520  	targetBlocks := blockCacheItems - 15
  1521  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1522  
  1523  	// Set a sync init hook to catch progress changes
  1524  	starting := make(chan struct{})
  1525  	progress := make(chan struct{})
  1526  
  1527  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1528  		starting <- struct{}{}
  1529  		<-progress
  1530  	}
  1531  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1532  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1533  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1534  	}
  1535  	// Attempt a full sync with a faulty peer
  1536  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1537  	missing := targetBlocks / 2
  1538  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1539  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1540  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1541  
  1542  	pending := new(sync.WaitGroup)
  1543  	pending.Add(1)
  1544  
  1545  	go func() {
  1546  		defer pending.Done()
  1547  		if err := tester.sync("faulty", nil, mode); err == nil {
  1548  			panic("succeeded faulty synchronisation")
  1549  		}
  1550  	}()
  1551  	<-starting
  1552  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1553  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1554  	}
  1555  	progress <- struct{}{}
  1556  	pending.Wait()
  1557  
  1558  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1559  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1560  	pending.Add(1)
  1561  
  1562  	go func() {
  1563  		defer pending.Done()
  1564  		if err := tester.sync("valid", nil, mode); err != nil {
  1565  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1566  		}
  1567  	}()
  1568  	<-starting
  1569  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1570  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1571  	}
  1572  	progress <- struct{}{}
  1573  	pending.Wait()
  1574  
  1575  	// Check final progress after successful sync
  1576  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1577  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1578  	}
  1579  }
  1580  
  1581  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1582  // the progress height is successfully reduced at the next sync invocation.
  1583  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1584  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1585  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1586  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1587  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1588  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1589  
  1590  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1591  	t.Parallel()
  1592  
  1593  	tester := newTester()
  1594  	defer tester.terminate()
  1595  
  1596  	// Create a small block chain
  1597  	targetBlocks := blockCacheItems - 15
  1598  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1599  
  1600  	// Set a sync init hook to catch progress changes
  1601  	starting := make(chan struct{})
  1602  	progress := make(chan struct{})
  1603  
  1604  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1605  		starting <- struct{}{}
  1606  		<-progress
  1607  	}
  1608  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1609  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1610  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1611  	}
  1612  	//  Create and sync with an attacker that promises a higher chain than available
  1613  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1614  	for i := 1; i < 3; i++ {
  1615  		delete(tester.peerHeaders["attack"], hashes[i])
  1616  		delete(tester.peerBlocks["attack"], hashes[i])
  1617  		delete(tester.peerReceipts["attack"], hashes[i])
  1618  	}
  1619  
  1620  	pending := new(sync.WaitGroup)
  1621  	pending.Add(1)
  1622  
  1623  	go func() {
  1624  		defer pending.Done()
  1625  		if err := tester.sync("attack", nil, mode); err == nil {
  1626  			panic("succeeded attacker synchronisation")
  1627  		}
  1628  	}()
  1629  	<-starting
  1630  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1631  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1632  	}
  1633  	progress <- struct{}{}
  1634  	pending.Wait()
  1635  
  1636  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1637  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1638  	pending.Add(1)
  1639  
  1640  	go func() {
  1641  		defer pending.Done()
  1642  		if err := tester.sync("valid", nil, mode); err != nil {
  1643  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1644  		}
  1645  	}()
  1646  	<-starting
  1647  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1648  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1649  	}
  1650  	progress <- struct{}{}
  1651  	pending.Wait()
  1652  
  1653  	// Check final progress after successful sync
  1654  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1655  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1656  	}
  1657  }
  1658  
  1659  // This test reproduces an issue where unexpected deliveries would
  1660  // block indefinitely if they arrived at the right time.
  1661  // We use data driven subtests to manage this so that it will be parallel on its own
  1662  // and not with the other tests, avoiding intermittent failures.
  1663  func TestDeliverHeadersHang(t *testing.T) {
  1664  	testCases := []struct {
  1665  		protocol int
  1666  		syncMode SyncMode
  1667  	}{
  1668  		{62, FullSync},
  1669  		{63, FullSync},
  1670  		{63, FastSync},
  1671  		{64, FullSync},
  1672  		{64, FastSync},
  1673  		{64, LightSync},
  1674  	}
  1675  	for _, tc := range testCases {
  1676  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1677  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1678  		})
  1679  	}
  1680  }
  1681  
  1682  type floodingTestPeer struct {
  1683  	peer   Peer
  1684  	tester *downloadTester
  1685  	pend   sync.WaitGroup
  1686  }
  1687  
  1688  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1689  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1690  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1691  }
  1692  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1693  	return ftp.peer.RequestBodies(hashes)
  1694  }
  1695  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1696  	return ftp.peer.RequestReceipts(hashes)
  1697  }
  1698  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1699  	return ftp.peer.RequestNodeData(hashes)
  1700  }
  1701  
  1702  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1703  	deliveriesDone := make(chan struct{}, 500)
  1704  	for i := 0; i < cap(deliveriesDone); i++ {
  1705  		peer := fmt.Sprintf("fake-peer%d", i)
  1706  		ftp.pend.Add(1)
  1707  
  1708  		go func() {
  1709  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1710  			deliveriesDone <- struct{}{}
  1711  			ftp.pend.Done()
  1712  		}()
  1713  	}
  1714  	// Deliver the actual requested headers.
  1715  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1716  	// None of the extra deliveries should block.
  1717  	timeout := time.After(60 * time.Second)
  1718  	for i := 0; i < cap(deliveriesDone); i++ {
  1719  		select {
  1720  		case <-deliveriesDone:
  1721  		case <-timeout:
  1722  			panic("blocked")
  1723  		}
  1724  	}
  1725  	return nil
  1726  }
  1727  
  1728  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1729  	t.Parallel()
  1730  
  1731  	master := newTester()
  1732  	defer master.terminate()
  1733  
  1734  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1735  	for i := 0; i < 200; i++ {
  1736  		tester := newTester()
  1737  		tester.peerDb = master.peerDb
  1738  
  1739  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1740  		// Whenever the downloader requests headers, flood it with
  1741  		// a lot of unrequested header deliveries.
  1742  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1743  			peer:   tester.downloader.peers.peers["peer"].peer,
  1744  			tester: tester,
  1745  		}
  1746  		if err := tester.sync("peer", nil, mode); err != nil {
  1747  			t.Errorf("test %d: sync failed: %v", i, err)
  1748  		}
  1749  		tester.terminate()
  1750  
  1751  		// Flush all goroutines to prevent messing with subsequent tests
  1752  		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1753  	}
  1754  }