github.com/klaytn/klaytn@v1.12.1/datasync/downloader/downloader_test.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from eth/downloader/downloader_test.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package downloader
    22  
    23  import (
    24  	"bytes"
    25  	"encoding/json"
    26  	"errors"
    27  	"fmt"
    28  	"math/big"
    29  	"strings"
    30  	"sync"
    31  	"sync/atomic"
    32  	"testing"
    33  	"time"
    34  
    35  	"github.com/klaytn/klaytn/blockchain"
    36  	"github.com/klaytn/klaytn/blockchain/types"
    37  	"github.com/klaytn/klaytn/common"
    38  	"github.com/klaytn/klaytn/consensus/gxhash"
    39  	"github.com/klaytn/klaytn/consensus/istanbul"
    40  	"github.com/klaytn/klaytn/crypto"
    41  	"github.com/klaytn/klaytn/event"
    42  	"github.com/klaytn/klaytn/log"
    43  	"github.com/klaytn/klaytn/params"
    44  	"github.com/klaytn/klaytn/reward"
    45  	"github.com/klaytn/klaytn/snapshot"
    46  	"github.com/klaytn/klaytn/storage/database"
    47  	"github.com/klaytn/klaytn/storage/statedb"
    48  )
    49  
    50  // Reduce some of the parameters to make the tester faster.
    51  func init() {
    52  	MaxForkAncestry = uint64(10000)
    53  	blockCacheMaxItems = 1024
    54  	fsHeaderContCheck = 500 * time.Millisecond
    55  }
    56  
    57  var (
    58  	lock                      sync.Mutex
    59  	setter                    *govSetter
    60  	testStakingUpdateInterval = uint64(4)
    61  )
    62  
    63  // govSetter sets governance items for testing purpose
    64  type govSetter struct {
    65  	numTesting          uint32
    66  	origStakingInterval uint64
    67  	origStakingManager  *reward.StakingManager
    68  }
    69  
    70  // setTestGovernance sets staking manager with memory db and staking update interval to 4.
    71  func setTestGovernance(db database.DBManager) {
    72  	lock.Lock()
    73  	defer lock.Unlock()
    74  	if setter == nil {
    75  		setter = &govSetter{
    76  			numTesting:          0,
    77  			origStakingInterval: params.StakingUpdateInterval(),
    78  			origStakingManager:  reward.GetStakingManager(),
    79  		}
    80  
    81  		reward.SetTestStakingManagerWithDB(db)
    82  		params.SetStakingUpdateInterval(testStakingUpdateInterval)
    83  	}
    84  	setter.numTesting += 1
    85  }
    86  
    87  // rollbackOrigGovernance rollbacks the original staking manager as well as staking update interval.
    88  func rollbackOrigGovernance() {
    89  	lock.Lock()
    90  	defer lock.Unlock()
    91  	setter.numTesting -= 1
    92  	if setter.numTesting == 0 {
    93  		reward.SetTestStakingManager(setter.origStakingManager)
    94  		params.SetStakingUpdateInterval(setter.origStakingInterval)
    95  
    96  		setter = nil
    97  	}
    98  }
    99  
   100  // downloadTester is a test simulator for mocking out local block chain.
   101  type downloadTester struct {
   102  	downloader *Downloader
   103  
   104  	genesis *types.Block       // Genesis blocks used by the tester and peers
   105  	stateDb database.DBManager // Database used by the tester for syncing from peers
   106  	peerDb  database.DBManager // Database of the peers containing all data
   107  
   108  	ownHashes      []common.Hash                       // Hash chain belonging to the tester
   109  	ownHeaders     map[common.Hash]*types.Header       // Headers belonging to the tester
   110  	ownBlocks      map[common.Hash]*types.Block        // Blocks belonging to the tester
   111  	ownReceipts    map[common.Hash]types.Receipts      // Receipts belonging to the tester
   112  	ownStakingInfo map[common.Hash]*reward.StakingInfo // Staking info belonging to the tester
   113  	ownChainTd     map[common.Hash]*big.Int            // Total difficulties of the blocks in the local chain
   114  
   115  	peerHashes       map[string][]common.Hash                       // Hash chain belonging to different test peers
   116  	peerHeaders      map[string]map[common.Hash]*types.Header       // Headers belonging to different test peers
   117  	peerBlocks       map[string]map[common.Hash]*types.Block        // Blocks belonging to different test peers
   118  	peerReceipts     map[string]map[common.Hash]types.Receipts      // Receipts belonging to different test peers
   119  	peerStakingInfos map[string]map[common.Hash]*reward.StakingInfo // StakingInfo belonging to different test peers
   120  	peerChainTds     map[string]map[common.Hash]*big.Int            // Total difficulties of the blocks in the peer chains
   121  
   122  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
   123  
   124  	lock sync.RWMutex
   125  }
   126  
   127  // newTester creates a new downloader test mocker.
   128  func newTester() *downloadTester {
   129  	remotedb := database.NewMemoryDBManager()
   130  	localdb := database.NewMemoryDBManager()
   131  	genesis := blockchain.GenesisBlockForTesting(remotedb, testAddress, big.NewInt(1000000000))
   132  	setTestGovernance(localdb)
   133  
   134  	tester := &downloadTester{
   135  		genesis:           genesis,
   136  		peerDb:            remotedb,
   137  		ownHashes:         []common.Hash{genesis.Hash()},
   138  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
   139  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
   140  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
   141  		ownStakingInfo:    map[common.Hash]*reward.StakingInfo{},
   142  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.BlockScore()},
   143  		peerHashes:        make(map[string][]common.Hash),
   144  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
   145  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
   146  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
   147  		peerStakingInfos:  make(map[string]map[common.Hash]*reward.StakingInfo),
   148  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
   149  		peerMissingStates: make(map[string]map[common.Hash]bool),
   150  	}
   151  	tester.stateDb = localdb
   152  	tester.stateDb.WriteTrieNode(genesis.Root().ExtendZero(), []byte{0x00})
   153  
   154  	tester.downloader = New(FullSync, tester.stateDb, statedb.NewSyncBloom(1, tester.stateDb.GetMemDB()), new(event.TypeMux), tester, nil, tester.dropPeer, uint64(istanbul.WeightedRandom))
   155  
   156  	return tester
   157  }
   158  
   159  func (dl *downloadTester) makeStakingInfoData(blockNumber uint64) (*reward.StakingInfo, []byte) {
   160  	k, _ := crypto.GenerateKey()
   161  	addr := crypto.PubkeyToAddress(k.PublicKey)
   162  	si := &reward.StakingInfo{
   163  		BlockNum: blockNumber,
   164  		KCFAddr:  addr, // assign KCF in order to put unique staking information
   165  	}
   166  	siBytes, _ := json.Marshal(si)
   167  	return si, siBytes
   168  }
   169  
   170  // makeChain creates a chain of n blocks starting at and including parent.
   171  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   172  // contains a transaction to allow testing correct block reassembly.
   173  // On every 4th block, staking information is updated to allow testing staking info
   174  // downloading as well.
   175  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]*reward.StakingInfo) {
   176  	stakingUpdatedBlocks := make(map[uint64]*reward.StakingInfo)
   177  	// Generate the block chain
   178  	blocks, receipts := blockchain.GenerateChain(params.TestChainConfig, parent, gxhash.NewFaker(), dl.peerDb, n, func(i int, block *blockchain.BlockGen) {
   179  		block.SetRewardbase(common.Address{seed})
   180  		// If a heavy chain is requested, delay blocks to raise blockscore
   181  		if heavy {
   182  			block.OffsetTime(-1)
   183  		}
   184  		// If the block number is multiple of 3, send a bonus transaction to the miner
   185  		if parent == dl.genesis && i%3 == 0 {
   186  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   187  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   188  			if err != nil {
   189  				panic(err)
   190  			}
   191  			block.AddTx(tx)
   192  		}
   193  
   194  		blockNum := block.Number().Uint64()
   195  		if blockNum%testStakingUpdateInterval == 0 {
   196  			si, siBytes := dl.makeStakingInfoData(blockNum)
   197  			stakingUpdatedBlocks[blockNum] = si
   198  			dl.peerDb.WriteStakingInfo(blockNum, siBytes)
   199  		}
   200  	})
   201  	// Convert the block-chain into a hash-chain and header/block maps
   202  	hashes := make([]common.Hash, n+1)
   203  	hashes[len(hashes)-1] = parent.Hash()
   204  
   205  	headerm := make(map[common.Hash]*types.Header, n+1)
   206  	headerm[parent.Hash()] = parent.Header()
   207  
   208  	blockm := make(map[common.Hash]*types.Block, n+1)
   209  	blockm[parent.Hash()] = parent
   210  
   211  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   212  	receiptm[parent.Hash()] = parentReceipts
   213  
   214  	stakingInfom := make(map[common.Hash]*reward.StakingInfo)
   215  	if parent.NumberU64()%testStakingUpdateInterval == 0 {
   216  		si, siBytes := dl.makeStakingInfoData(parent.NumberU64())
   217  		stakingInfom[parent.Hash()] = si
   218  		dl.peerDb.WriteStakingInfo(parent.NumberU64(), siBytes)
   219  	}
   220  
   221  	for i, b := range blocks {
   222  		hashes[len(hashes)-i-2] = b.Hash()
   223  		headerm[b.Hash()] = b.Header()
   224  		blockm[b.Hash()] = b
   225  		receiptm[b.Hash()] = receipts[i]
   226  		if b.NumberU64()%testStakingUpdateInterval == 0 {
   227  			stakingInfom[b.Hash()] = stakingUpdatedBlocks[b.NumberU64()]
   228  		}
   229  	}
   230  	return hashes, headerm, blockm, receiptm, stakingInfom
   231  }
   232  
   233  // makeChainFork creates two chains of length n, such that h1[:f] and
   234  // h2[:f] are different but have a common suffix of length n-f.
   235  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts, map[common.Hash]*reward.StakingInfo, map[common.Hash]*reward.StakingInfo) {
   236  	// Create the common suffix
   237  	hashes, headers, blocks, receipts, stakingInfos := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   238  
   239  	// Create the forks, making the second heavier if non balanced forks were requested
   240  	hashes1, headers1, blocks1, receipts1, stakingInfos1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   241  	hashes1 = append(hashes1, hashes[1:]...)
   242  
   243  	heavy := false
   244  	if !balanced {
   245  		heavy = true
   246  	}
   247  	hashes2, headers2, blocks2, receipts2, stakingInfos2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   248  	hashes2 = append(hashes2, hashes[1:]...)
   249  
   250  	for hash, header := range headers {
   251  		headers1[hash] = header
   252  		headers2[hash] = header
   253  	}
   254  	for hash, block := range blocks {
   255  		blocks1[hash] = block
   256  		blocks2[hash] = block
   257  	}
   258  	for hash, receipt := range receipts {
   259  		receipts1[hash] = receipt
   260  		receipts2[hash] = receipt
   261  	}
   262  	for hash, stakingInfo := range stakingInfos {
   263  		stakingInfos1[hash] = stakingInfo
   264  		stakingInfos2[hash] = stakingInfo
   265  	}
   266  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2, stakingInfos1, stakingInfos2
   267  }
   268  
   269  // terminate aborts any operations on the embedded downloader and releases all
   270  // held resources.
   271  func (dl *downloadTester) terminate() {
   272  	dl.downloader.Terminate()
   273  	rollbackOrigGovernance()
   274  }
   275  
   276  // sync starts synchronizing with a remote peer, blocking until it completes.
   277  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   278  	dl.lock.RLock()
   279  	hash := dl.peerHashes[id][0]
   280  	// If no particular TD was requested, load from the peer's blockchain
   281  	if td == nil {
   282  		td = big.NewInt(1)
   283  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   284  			td = diff
   285  		}
   286  	}
   287  	dl.lock.RUnlock()
   288  
   289  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   290  	err := dl.downloader.synchronise(id, hash, td, mode)
   291  	select {
   292  	case <-dl.downloader.cancelCh:
   293  		// Ok, downloader fully cancelled after sync cycle
   294  	default:
   295  		// Downloader is still accepting packets, can block a peer up
   296  		panic("downloader active post sync cycle") // panic will be caught by tester
   297  	}
   298  	return err
   299  }
   300  
   301  func (dl *downloadTester) syncStakingInfos(id string, from, to uint64) error {
   302  	return dl.downloader.SyncStakingInfo(id, from, to)
   303  }
   304  
   305  // HasHeader checks if a header is present in the testers canonical chain.
   306  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   307  	return dl.GetHeaderByHash(hash) != nil
   308  }
   309  
   310  // HasBlock checks if a block is present in the testers canonical chain.
   311  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   312  	return dl.GetBlockByHash(hash) != nil
   313  }
   314  
   315  // GetHeader retrieves a header from the testers canonical chain.
   316  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   317  	dl.lock.RLock()
   318  	defer dl.lock.RUnlock()
   319  
   320  	return dl.ownHeaders[hash]
   321  }
   322  
   323  // GetBlock retrieves a block from the testers canonical chain.
   324  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   325  	dl.lock.RLock()
   326  	defer dl.lock.RUnlock()
   327  
   328  	return dl.ownBlocks[hash]
   329  }
   330  
   331  // CurrentHeader retrieves the current head header from the canonical chain.
   332  func (dl *downloadTester) CurrentHeader() *types.Header {
   333  	dl.lock.RLock()
   334  	defer dl.lock.RUnlock()
   335  
   336  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   337  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   338  			return header
   339  		}
   340  	}
   341  	return dl.genesis.Header()
   342  }
   343  
   344  // CurrentBlock retrieves the current head block from the canonical chain.
   345  func (dl *downloadTester) CurrentBlock() *types.Block {
   346  	dl.lock.RLock()
   347  	defer dl.lock.RUnlock()
   348  
   349  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   350  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   351  			if has, _ := dl.stateDb.HasTrieNode(block.Root().ExtendZero()); has {
   352  				return block
   353  			}
   354  		}
   355  	}
   356  	return dl.genesis
   357  }
   358  
   359  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   360  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   361  	dl.lock.RLock()
   362  	defer dl.lock.RUnlock()
   363  
   364  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   365  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   366  			return block
   367  		}
   368  	}
   369  	return dl.genesis
   370  }
   371  
   372  // FastSyncCommitHead manually sets the head block to a given hash.
   373  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   374  	// For now only check that the state trie is correct
   375  	if block := dl.GetBlockByHash(hash); block != nil {
   376  		_, err := statedb.NewSecureTrie(block.Root(), statedb.NewDatabase(dl.stateDb), nil)
   377  		return err
   378  	}
   379  	return fmt.Errorf("non existent block: %x", hash[:4])
   380  }
   381  
   382  // GetTd retrieves the block's total blockscore from the canonical chain.
   383  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   384  	dl.lock.RLock()
   385  	defer dl.lock.RUnlock()
   386  
   387  	return dl.ownChainTd[hash]
   388  }
   389  
   390  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   391  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   392  	dl.lock.Lock()
   393  	defer dl.lock.Unlock()
   394  
   395  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   396  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   397  		return 0, errors.New("InsertHeaderChain: unknown parent at first position")
   398  	}
   399  	var hashes []common.Hash
   400  	for i := 1; i < len(headers); i++ {
   401  		hash := headers[i-1].Hash()
   402  		if headers[i].ParentHash != headers[i-1].Hash() {
   403  			return i, fmt.Errorf("non-contiguous import at position %d", i)
   404  		}
   405  		hashes = append(hashes, hash)
   406  	}
   407  	hashes = append(hashes, headers[len(headers)-1].Hash())
   408  	// Do a full insert if pre-checks passed
   409  	for i, header := range headers {
   410  		hash := hashes[i]
   411  		if _, ok := dl.ownHeaders[hash]; ok {
   412  			continue
   413  		}
   414  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   415  			// This _should_ be impossible, due to precheck and induction
   416  			return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
   417  		}
   418  		dl.ownHashes = append(dl.ownHashes, hash)
   419  		dl.ownHeaders[hash] = header
   420  		dl.ownChainTd[hash] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.BlockScore)
   421  	}
   422  	return len(headers), nil
   423  }
   424  
   425  // InsertChain injects a new batch of blocks into the simulated chain.
   426  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   427  	dl.lock.Lock()
   428  	defer dl.lock.Unlock()
   429  
   430  	for i, block := range blocks {
   431  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   432  			return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
   433  		} else if has, _ := dl.stateDb.HasTrieNode(parent.Root().ExtendZero()); !has {
   434  			return i, fmt.Errorf("InsertChain: unknown parent state %x", parent.Root())
   435  		}
   436  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   437  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   438  			dl.ownHeaders[block.Hash()] = block.Header()
   439  		}
   440  		dl.ownBlocks[block.Hash()] = block
   441  		dl.stateDb.WriteTrieNode(block.Root().ExtendZero(), []byte{0x00})
   442  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.BlockScore())
   443  	}
   444  	return len(blocks), nil
   445  }
   446  
   447  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   448  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   449  	dl.lock.Lock()
   450  	defer dl.lock.Unlock()
   451  
   452  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   453  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   454  			return i, errors.New("unknown owner")
   455  		}
   456  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   457  			return i, errors.New("InsertReceiptChain: unknown parent")
   458  		}
   459  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   460  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   461  
   462  		siBytes, _ := dl.peerDb.ReadStakingInfo(blocks[i].NumberU64())
   463  		if siBytes != nil {
   464  			stakingInfo := new(reward.StakingInfo)
   465  			json.Unmarshal(siBytes, stakingInfo)
   466  			dl.ownStakingInfo[blocks[i].Hash()] = stakingInfo
   467  		}
   468  	}
   469  	return len(blocks), nil
   470  }
   471  
   472  // Snapshots returns the blockchain snapshot tree.
   473  func (dl *downloadTester) Snapshots() *snapshot.Tree {
   474  	return nil
   475  }
   476  
   477  // Rollback removes some recently added elements from the chain.
   478  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   479  	dl.lock.Lock()
   480  	defer dl.lock.Unlock()
   481  
   482  	for i := len(hashes) - 1; i >= 0; i-- {
   483  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   484  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   485  		}
   486  		delete(dl.ownChainTd, hashes[i])
   487  		delete(dl.ownHeaders, hashes[i])
   488  		delete(dl.ownReceipts, hashes[i])
   489  		delete(dl.ownBlocks, hashes[i])
   490  		delete(dl.ownStakingInfo, hashes[i])
   491  	}
   492  }
   493  
   494  // newPeer registers a new block download source into the downloader.
   495  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, stakingInfos map[common.Hash]*reward.StakingInfo) error {
   496  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, stakingInfos, 0)
   497  }
   498  
   499  // newSlowPeer registers a new block download source into the downloader, with a
   500  // specific delay time on processing the network packets sent to it, simulating
   501  // potentially slow network IO.
   502  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, stakingInfos map[common.Hash]*reward.StakingInfo, delay time.Duration) error {
   503  	dl.lock.Lock()
   504  	defer dl.lock.Unlock()
   505  
   506  	err := dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   507  	if err == nil {
   508  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   509  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   510  		copy(dl.peerHashes[id], hashes)
   511  
   512  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   513  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   514  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   515  		dl.peerStakingInfos[id] = make(map[common.Hash]*reward.StakingInfo)
   516  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   517  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   518  
   519  		genesis := hashes[len(hashes)-1]
   520  		if header := headers[genesis]; header != nil {
   521  			dl.peerHeaders[id][genesis] = header
   522  			dl.peerChainTds[id][genesis] = header.BlockScore
   523  		}
   524  		if block := blocks[genesis]; block != nil {
   525  			dl.peerBlocks[id][genesis] = block
   526  			dl.peerChainTds[id][genesis] = block.BlockScore()
   527  		}
   528  		if stakingInfo := stakingInfos[genesis]; stakingInfo != nil {
   529  			dl.peerStakingInfos[id][genesis] = stakingInfo
   530  		}
   531  
   532  		for i := len(hashes) - 2; i >= 0; i-- {
   533  			hash := hashes[i]
   534  
   535  			if header, ok := headers[hash]; ok {
   536  				dl.peerHeaders[id][hash] = header
   537  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   538  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.BlockScore, dl.peerChainTds[id][header.ParentHash])
   539  				}
   540  			}
   541  			if block, ok := blocks[hash]; ok {
   542  				dl.peerBlocks[id][hash] = block
   543  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   544  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.BlockScore(), dl.peerChainTds[id][block.ParentHash()])
   545  				}
   546  			}
   547  			if receipt, ok := receipts[hash]; ok {
   548  				dl.peerReceipts[id][hash] = receipt
   549  			}
   550  
   551  			if stakingInfo, ok := stakingInfos[hash]; ok {
   552  				dl.peerStakingInfos[id][hash] = stakingInfo
   553  			}
   554  		}
   555  	}
   556  	return err
   557  }
   558  
   559  // dropPeer simulates a hard peer removal from the connection pool.
   560  func (dl *downloadTester) dropPeer(id string) {
   561  	dl.lock.Lock()
   562  	defer dl.lock.Unlock()
   563  
   564  	delete(dl.peerHashes, id)
   565  	delete(dl.peerHeaders, id)
   566  	delete(dl.peerBlocks, id)
   567  	delete(dl.peerChainTds, id)
   568  
   569  	dl.downloader.UnregisterPeer(id)
   570  }
   571  
   572  type downloadTesterPeer struct {
   573  	dl    *downloadTester
   574  	id    string
   575  	delay time.Duration
   576  	lock  sync.RWMutex
   577  }
   578  
   579  // setDelay is a thread safe setter for the network delay value.
   580  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   581  	dlp.lock.Lock()
   582  	defer dlp.lock.Unlock()
   583  
   584  	dlp.delay = delay
   585  }
   586  
   587  // waitDelay is a thread safe way to sleep for the configured time.
   588  func (dlp *downloadTesterPeer) waitDelay() {
   589  	dlp.lock.RLock()
   590  	delay := dlp.delay
   591  	dlp.lock.RUnlock()
   592  
   593  	time.Sleep(delay)
   594  }
   595  
   596  // Head constructs a function to retrieve a peer's current head hash
   597  // and total blockscore.
   598  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   599  	dlp.dl.lock.RLock()
   600  	defer dlp.dl.lock.RUnlock()
   601  
   602  	return dlp.dl.peerHashes[dlp.id][0], nil
   603  }
   604  
   605  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   606  // origin; associated with a particular peer in the download tester. The returned
   607  // function can be used to retrieve batches of headers from the particular peer.
   608  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   609  	// Find the canonical number of the hash
   610  	dlp.dl.lock.RLock()
   611  	number := uint64(0)
   612  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   613  		if hash == origin {
   614  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   615  			break
   616  		}
   617  	}
   618  	dlp.dl.lock.RUnlock()
   619  
   620  	// Use the absolute header fetcher to satisfy the query
   621  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   622  }
   623  
   624  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   625  // origin; associated with a particular peer in the download tester. The returned
   626  // function can be used to retrieve batches of headers from the particular peer.
   627  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   628  	dlp.waitDelay()
   629  
   630  	dlp.dl.lock.RLock()
   631  	defer dlp.dl.lock.RUnlock()
   632  
   633  	// Gather the next batch of headers
   634  	hashes := dlp.dl.peerHashes[dlp.id]
   635  	headers := dlp.dl.peerHeaders[dlp.id]
   636  	result := make([]*types.Header, 0, amount)
   637  	if reverse {
   638  		for i := 0; i < amount && 0 <= int(origin)-i*(skip+1); i++ {
   639  			if header, ok := headers[hashes[len(hashes)-1-int(origin)+i*(skip+1)]]; ok {
   640  				result = append(result, header)
   641  			}
   642  		}
   643  	} else {
   644  		for i := 0; i < amount && len(hashes)-1-int(origin)-i*(skip+1) >= 0; i++ {
   645  			if header, ok := headers[hashes[len(hashes)-1-int(origin)-i*(skip+1)]]; ok {
   646  				result = append(result, header)
   647  			}
   648  		}
   649  	}
   650  	// Delay delivery a bit to allow attacks to unfold
   651  	go func() {
   652  		time.Sleep(time.Millisecond)
   653  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   654  	}()
   655  	return nil
   656  }
   657  
   658  // RequestBodies constructs a getBlockBodies method associated with a particular
   659  // peer in the download tester. The returned function can be used to retrieve
   660  // batches of block bodies from the particularly requested peer.
   661  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   662  	dlp.waitDelay()
   663  
   664  	dlp.dl.lock.RLock()
   665  	defer dlp.dl.lock.RUnlock()
   666  
   667  	blocks := dlp.dl.peerBlocks[dlp.id]
   668  
   669  	transactions := make([][]*types.Transaction, 0, len(hashes))
   670  
   671  	for _, hash := range hashes {
   672  		if block, ok := blocks[hash]; ok {
   673  			transactions = append(transactions, block.Transactions())
   674  		}
   675  	}
   676  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions)
   677  
   678  	return nil
   679  }
   680  
   681  // RequestReceipts constructs a getReceipts method associated with a particular
   682  // peer in the download tester. The returned function can be used to retrieve
   683  // batches of block receipts from the particularly requested peer.
   684  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   685  	dlp.waitDelay()
   686  
   687  	dlp.dl.lock.RLock()
   688  	defer dlp.dl.lock.RUnlock()
   689  
   690  	receipts := dlp.dl.peerReceipts[dlp.id]
   691  
   692  	results := make([][]*types.Receipt, 0, len(hashes))
   693  	for _, hash := range hashes {
   694  		if receipt, ok := receipts[hash]; ok {
   695  			results = append(results, receipt)
   696  		}
   697  	}
   698  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   699  
   700  	return nil
   701  }
   702  
   703  // RequestStakingInfo constructs a getStakingInfo method associated with a particular
   704  // peer in the download tester. The returned function can be used to retrieve
   705  // batches of staking information from the particularly requested peer.
   706  func (dlp *downloadTesterPeer) RequestStakingInfo(hashes []common.Hash) error {
   707  	dlp.waitDelay()
   708  
   709  	dlp.dl.lock.RLock()
   710  	defer dlp.dl.lock.RUnlock()
   711  
   712  	stakingInfos := dlp.dl.peerStakingInfos[dlp.id]
   713  
   714  	results := []*reward.StakingInfo{}
   715  
   716  	for _, hash := range hashes {
   717  		if stakingInfo, ok := stakingInfos[hash]; ok {
   718  			results = append(results, stakingInfo)
   719  		}
   720  	}
   721  	go dlp.dl.downloader.DeliverStakingInfos(dlp.id, results)
   722  
   723  	return nil
   724  }
   725  
   726  // RequestNodeData constructs a getNodeData method associated with a particular
   727  // peer in the download tester. The returned function can be used to retrieve
   728  // batches of node state data from the particularly requested peer.
   729  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   730  	dlp.waitDelay()
   731  
   732  	dlp.dl.lock.RLock()
   733  	defer dlp.dl.lock.RUnlock()
   734  
   735  	results := make([][]byte, 0, len(hashes))
   736  	for _, hash := range hashes {
   737  		if data, err := dlp.dl.peerDb.ReadTrieNode(hash.ExtendZero()); err == nil {
   738  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   739  				results = append(results, data)
   740  			}
   741  		}
   742  	}
   743  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   744  
   745  	return nil
   746  }
   747  
   748  // assertOwnChain checks if the local chain contains the correct number of items
   749  // of the various chain components.
   750  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   751  	assertOwnForkedChain(t, tester, 1, []int{length})
   752  }
   753  
   754  // assertOwnForkedChain checks if the local forked chain contains the correct
   755  // number of items of the various chain components.
   756  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   757  	// Initialize the counters for the first fork
   758  	headers, blocks, receipts, stakingInfos := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks, lengths[0]-fsMinFullBlocks
   759  
   760  	if receipts < 0 {
   761  		receipts = 1
   762  	}
   763  	// Update the counters for each subsequent fork
   764  	for _, length := range lengths[1:] {
   765  		headers += length - common
   766  		blocks += length - common
   767  		receipts += length - common - fsMinFullBlocks
   768  		stakingInfos += length - common - fsMinFullBlocks
   769  	}
   770  	stakingInfos = stakingInfos / int(testStakingUpdateInterval) // assuming that staking information update interval is 4
   771  	switch tester.downloader.getMode() {
   772  	case FullSync:
   773  		receipts, stakingInfos = 1, 0
   774  	case LightSync:
   775  		blocks, receipts, stakingInfos = 1, 1, 0
   776  	}
   777  	if hs := len(tester.ownHeaders); hs != headers {
   778  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   779  	}
   780  	if bs := len(tester.ownBlocks); bs != blocks {
   781  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   782  	}
   783  	if rs := len(tester.ownReceipts); rs != receipts {
   784  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   785  	}
   786  	if ss := len(tester.ownStakingInfo); ss != stakingInfos {
   787  		t.Fatalf("synchronised stakingInfos mismatch: have %v, want %v", ss, stakingInfos)
   788  	}
   789  	// Verify the state trie too for fast syncs
   790  	/*if tester.downloader.mode == FastSync {
   791  		pivot := uint64(0)
   792  		var index int
   793  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   794  			index = pivot
   795  		} else {
   796  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   797  		}
   798  		if index > 0 {
   799  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   800  				t.Fatalf("state reconstruction failed: %v", err)
   801  			}
   802  		}
   803  	}*/
   804  }
   805  
   806  // Tests that simple synchronization against a canonical chain works correctly.
   807  // In this test common ancestor lookup should be short circuited and not require
   808  // binary searching.
   809  func TestCanonicalSynchronisation62(t *testing.T)     { testCanonicalSynchronisation(t, 62, FullSync) }
   810  func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) }
   811  func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) }
   812  func TestCanonicalSynchronisation64Light(t *testing.T) {
   813  	testCanonicalSynchronisation(t, 64, LightSync)
   814  }
   815  func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonicalSynchronisation(t, 65, FullSync) }
   816  func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonicalSynchronisation(t, 65, FastSync) }
   817  
   818  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   819  	t.Parallel()
   820  
   821  	tester := newTester()
   822  	defer tester.terminate()
   823  
   824  	// Create a small enough block chain to download
   825  	targetBlocks := blockCacheMaxItems - 15
   826  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   827  
   828  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, stakingInfos)
   829  
   830  	// Synchronise with the peer and make sure all relevant data was retrieved
   831  	if err := tester.sync("peer", nil, mode); err != nil {
   832  		t.Fatalf("failed to synchronise blocks: %v", err)
   833  	}
   834  	assertOwnChain(t, tester, targetBlocks+1)
   835  }
   836  
   837  // Tests that if a large batch of blocks are being downloaded, it is throttled
   838  // until the cached blocks are retrieved.
   839  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   840  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   841  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   842  func TestThrottling65Full(t *testing.T) { testThrottling(t, 65, FullSync) }
   843  func TestThrottling65Fast(t *testing.T) { testThrottling(t, 65, FastSync) }
   844  
   845  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   846  	t.Parallel()
   847  	tester := newTester()
   848  	defer tester.terminate()
   849  
   850  	// Create a long block chain to download and the tester
   851  	targetBlocks := 8 * blockCacheMaxItems
   852  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   853  
   854  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, stakingInfos)
   855  
   856  	// Wrap the importer to allow stepping
   857  	blocked, proceed := uint32(0), make(chan struct{})
   858  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   859  		atomic.StoreUint32(&blocked, uint32(len(results)))
   860  		<-proceed
   861  	}
   862  	// Start a synchronisation concurrently
   863  	errc := make(chan error)
   864  	go func() {
   865  		errc <- tester.sync("peer", nil, mode)
   866  	}()
   867  	// Iteratively take some blocks, always checking the retrieval count
   868  	for {
   869  		// Check the retrieval count synchronously (! reason for this ugly block)
   870  		tester.lock.RLock()
   871  		retrieved := len(tester.ownBlocks)
   872  		tester.lock.RUnlock()
   873  		if retrieved >= targetBlocks+1 {
   874  			break
   875  		}
   876  		// Wait a bit for sync to throttle itself
   877  		var cached, frozen int
   878  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   879  			time.Sleep(25 * time.Millisecond)
   880  
   881  			tester.lock.Lock()
   882  			{
   883  				tester.downloader.queue.resultCache.lock.Lock()
   884  				cached = tester.downloader.queue.resultCache.countCompleted()
   885  				tester.downloader.queue.resultCache.lock.Unlock()
   886  				frozen = int(atomic.LoadUint32(&blocked))
   887  				retrieved = len(tester.ownBlocks)
   888  			}
   889  			tester.lock.Unlock()
   890  
   891  			if cached == blockCacheMaxItems || retrieved+cached+frozen == targetBlocks+1 {
   892  				break
   893  			}
   894  		}
   895  		// Make sure we filled up the cache, then exhaust it
   896  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   897  
   898  		tester.lock.RLock()
   899  		retrieved = len(tester.ownBlocks)
   900  		tester.lock.RUnlock()
   901  		if cached != blockCacheMaxItems && retrieved+cached+frozen != targetBlocks+1 {
   902  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
   903  		}
   904  		// Permit the blocked blocks to import
   905  		if atomic.LoadUint32(&blocked) > 0 {
   906  			atomic.StoreUint32(&blocked, uint32(0))
   907  			proceed <- struct{}{}
   908  		}
   909  	}
   910  	// Check that we haven't pulled more blocks than available
   911  	assertOwnChain(t, tester, targetBlocks+1)
   912  	if err := <-errc; err != nil {
   913  		t.Fatalf("block synchronization failed: %v", err)
   914  	}
   915  }
   916  
   917  // Tests that simple synchronization against a forked chain works correctly. In
   918  // this test common ancestor lookup should *not* be short circuited, and a full
   919  // binary search should be executed.
   920  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   921  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   922  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   923  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   924  func TestForkedSync65Full(t *testing.T)  { testForkedSync(t, 65, FullSync) }
   925  func TestForkedSync65Fast(t *testing.T)  { testForkedSync(t, 65, FastSync) }
   926  
   927  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   928  	t.Parallel()
   929  
   930  	tester := newTester()
   931  	defer tester.terminate()
   932  
   933  	// Create a long enough forked chain
   934  	common, fork := MaxHashFetch, 2*MaxHashFetch
   935  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, stakingInfosA, stakingInfosB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   936  
   937  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA, stakingInfosA)
   938  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB, stakingInfosB)
   939  
   940  	// Synchronise with the peer and make sure all blocks were retrieved
   941  	if err := tester.sync("fork A", nil, mode); err != nil {
   942  		t.Fatalf("failed to synchronise blocks: %v", err)
   943  	}
   944  	assertOwnChain(t, tester, common+fork+1)
   945  
   946  	// Synchronise with the second peer and make sure that fork is pulled too
   947  	if err := tester.sync("fork B", nil, mode); err != nil {
   948  		t.Fatalf("failed to synchronise blocks: %v", err)
   949  	}
   950  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   951  }
   952  
   953  // Tests that synchronising against a much shorter but much heavyer fork works
   954  // corrently and is not dropped.
   955  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   956  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   957  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   958  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   959  func TestHeavyForkedSync65Full(t *testing.T)  { testHeavyForkedSync(t, 65, FullSync) }
   960  func TestHeavyForkedSync65Fast(t *testing.T)  { testHeavyForkedSync(t, 65, FastSync) }
   961  
   962  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   963  	t.Parallel()
   964  
   965  	tester := newTester()
   966  	defer tester.terminate()
   967  
   968  	// Create a long enough forked chain
   969  	common, fork := MaxHashFetch, 4*MaxHashFetch
   970  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, stakingInfoA, stakingInfoB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   971  
   972  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA, stakingInfoA)
   973  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB, stakingInfoB)
   974  
   975  	// Synchronise with the peer and make sure all blocks were retrieved
   976  	if err := tester.sync("light", nil, mode); err != nil {
   977  		t.Fatalf("failed to synchronise blocks: %v", err)
   978  	}
   979  	assertOwnChain(t, tester, common+fork+1)
   980  
   981  	// Synchronise with the second peer and make sure that fork is pulled too
   982  	if err := tester.sync("heavy", nil, mode); err != nil {
   983  		t.Fatalf("failed to synchronise blocks: %v", err)
   984  	}
   985  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   986  }
   987  
   988  // Tests that chain forks are contained within a certain interval of the current
   989  // chain head, ensuring that malicious peers cannot waste resources by feeding
   990  // long dead chains.
   991  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   992  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   993  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   994  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   995  func TestBoundedForkedSync65Full(t *testing.T)  { testBoundedForkedSync(t, 65, FullSync) }
   996  func TestBoundedForkedSync65Fast(t *testing.T)  { testBoundedForkedSync(t, 65, FastSync) }
   997  
   998  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   999  	t.Parallel()
  1000  
  1001  	tester := newTester()
  1002  	defer tester.terminate()
  1003  
  1004  	// Create a long enough forked chain
  1005  	common, fork := 13, int(MaxForkAncestry+17)
  1006  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, stakingInfoA, stakingInfoB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1007  
  1008  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA, stakingInfoA)
  1009  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB, stakingInfoB)
  1010  
  1011  	// Synchronise with the peer and make sure all blocks were retrieved
  1012  	if err := tester.sync("original", nil, mode); err != nil {
  1013  		t.Fatalf("failed to synchronise blocks: %v", err)
  1014  	}
  1015  	assertOwnChain(t, tester, common+fork+1)
  1016  
  1017  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
  1018  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
  1019  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
  1020  	}
  1021  }
  1022  
  1023  // Tests that chain forks are contained within a certain interval of the current
  1024  // chain head for short but heavy forks too. These are a bit special because they
  1025  // take different ancestor lookup paths.
  1026  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
  1027  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
  1028  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
  1029  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
  1030  func TestBoundedHeavyForkedSync65Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 65, FullSync) }
  1031  func TestBoundedHeavyForkedSync65Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 65, FastSync) }
  1032  
  1033  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
  1034  	t.Parallel()
  1035  
  1036  	tester := newTester()
  1037  	defer tester.terminate()
  1038  
  1039  	// Create a long enough forked chain
  1040  	common, fork := 13, int(MaxForkAncestry+17)
  1041  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, stakingInfoA, stakingInfoB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
  1042  
  1043  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA, stakingInfoA)
  1044  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB, stakingInfoB) // Root the fork below the ancestor limit
  1045  
  1046  	// Synchronise with the peer and make sure all blocks were retrieved
  1047  	if err := tester.sync("original", nil, mode); err != nil {
  1048  		t.Fatalf("failed to synchronise blocks: %v", err)
  1049  	}
  1050  	assertOwnChain(t, tester, common+fork+1)
  1051  
  1052  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
  1053  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
  1054  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
  1055  	}
  1056  }
  1057  
  1058  // Tests that an inactive downloader will not accept incoming block headers and
  1059  // bodies.
  1060  func TestInactiveDownloader62(t *testing.T) {
  1061  	t.Parallel()
  1062  
  1063  	tester := newTester()
  1064  	defer tester.terminate()
  1065  
  1066  	// Check that neither block headers nor bodies are accepted
  1067  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
  1068  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  1069  	}
  1070  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
  1071  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  1072  	}
  1073  }
  1074  
  1075  // Tests that an inactive downloader will not accept incoming block headers,
  1076  // bodies and receipts.
  1077  func TestInactiveDownloader63(t *testing.T) {
  1078  	t.Parallel()
  1079  
  1080  	tester := newTester()
  1081  	defer tester.terminate()
  1082  
  1083  	// Check that neither block headers nor bodies are accepted
  1084  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
  1085  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  1086  	}
  1087  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
  1088  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  1089  	}
  1090  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
  1091  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  1092  	}
  1093  }
  1094  
  1095  // Tests that a canceled download wipes all previously accumulated state.
  1096  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
  1097  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
  1098  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
  1099  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
  1100  func TestCancel65Full(t *testing.T)  { testCancel(t, 65, FullSync) }
  1101  func TestCancel65Fast(t *testing.T)  { testCancel(t, 65, FastSync) }
  1102  
  1103  func testCancel(t *testing.T, protocol int, mode SyncMode) {
  1104  	t.Parallel()
  1105  
  1106  	tester := newTester()
  1107  	defer tester.terminate()
  1108  
  1109  	// Create a small enough block chain to download and the tester
  1110  	targetBlocks := blockCacheMaxItems - 15
  1111  	if targetBlocks >= MaxHashFetch {
  1112  		targetBlocks = MaxHashFetch - 15
  1113  	}
  1114  	if targetBlocks >= MaxHeaderFetch {
  1115  		targetBlocks = MaxHeaderFetch - 15
  1116  	}
  1117  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1118  
  1119  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1120  
  1121  	// Make sure canceling works with a pristine downloader
  1122  	tester.downloader.Cancel()
  1123  	if !tester.downloader.queue.Idle() {
  1124  		t.Errorf("download queue not idle")
  1125  	}
  1126  	// Synchronise with the peer, but cancel afterwards
  1127  	if err := tester.sync("peer", nil, mode); err != nil {
  1128  		t.Fatalf("failed to synchronise blocks: %v", err)
  1129  	}
  1130  	tester.downloader.Cancel()
  1131  	if !tester.downloader.queue.Idle() {
  1132  		t.Errorf("download queue not idle")
  1133  	}
  1134  }
  1135  
  1136  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  1137  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
  1138  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
  1139  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
  1140  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
  1141  func TestMultiSynchronisation65Full(t *testing.T)  { testMultiSynchronisation(t, 65, FullSync) }
  1142  func TestMultiSynchronisation65Fast(t *testing.T)  { testMultiSynchronisation(t, 65, FastSync) }
  1143  
  1144  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1145  	t.Parallel()
  1146  
  1147  	tester := newTester()
  1148  	defer tester.terminate()
  1149  
  1150  	// Create various peers with various parts of the chain
  1151  	targetPeers := 8
  1152  	targetBlocks := targetPeers*blockCacheMaxItems - 15
  1153  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1154  
  1155  	for i := 0; i < targetPeers; i++ {
  1156  		id := fmt.Sprintf("peer #%d", i)
  1157  		tester.newPeer(id, protocol, hashes[i*blockCacheMaxItems:], headers, blocks, receipts, stakingInfos)
  1158  	}
  1159  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1160  		t.Fatalf("failed to synchronise blocks: %v", err)
  1161  	}
  1162  	assertOwnChain(t, tester, targetBlocks+1)
  1163  }
  1164  
  1165  // Tests that synchronisations behave well in multi-version protocol environments
  1166  // and not wreak havoc on other nodes in the network.
  1167  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1168  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1169  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1170  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1171  func TestMultiProtoSynchronisation65Full(t *testing.T)  { testMultiProtoSync(t, 65, FullSync) }
  1172  func TestMultiProtoSynchronisation65Fast(t *testing.T)  { testMultiProtoSync(t, 65, FastSync) }
  1173  
  1174  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1175  	t.Parallel()
  1176  
  1177  	tester := newTester()
  1178  	defer tester.terminate()
  1179  
  1180  	// Create a small enough block chain to download
  1181  	targetBlocks := blockCacheMaxItems - 15
  1182  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1183  
  1184  	// Create peers of every type
  1185  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil, stakingInfos)
  1186  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts, stakingInfos)
  1187  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts, stakingInfos)
  1188  	tester.newPeer("peer 65", 65, hashes, headers, blocks, receipts, stakingInfos)
  1189  
  1190  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1191  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1192  		t.Fatalf("failed to synchronise blocks: %v", err)
  1193  	}
  1194  	assertOwnChain(t, tester, targetBlocks+1)
  1195  
  1196  	// Check that no peers have been dropped off
  1197  	for _, version := range []int{62, 63, 64, 65} {
  1198  		peer := fmt.Sprintf("peer %d", version)
  1199  		if _, ok := tester.peerHashes[peer]; !ok {
  1200  			t.Errorf("%s dropped", peer)
  1201  		}
  1202  	}
  1203  }
  1204  
  1205  // Tests that if a block is empty (e.g. header only), no body request should be
  1206  // made, and instead the header should be assembled into a whole block in itself.
  1207  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1208  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1209  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1210  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1211  func TestEmptyShortCircuit65Full(t *testing.T)  { testEmptyShortCircuit(t, 65, FullSync) }
  1212  func TestEmptyShortCircuit65Fast(t *testing.T)  { testEmptyShortCircuit(t, 65, FastSync) }
  1213  
  1214  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1215  	t.Parallel()
  1216  
  1217  	tester := newTester()
  1218  	defer tester.terminate()
  1219  
  1220  	// Create a block chain to download
  1221  	targetBlocks := 2*blockCacheMaxItems - 15
  1222  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1223  
  1224  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1225  
  1226  	// Instrument the downloader to signal body requests
  1227  	bodiesHave, receiptsHave := int32(0), int32(0)
  1228  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1229  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1230  	}
  1231  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1232  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1233  	}
  1234  	// Synchronise with the peer and make sure all blocks were retrieved
  1235  	if err := tester.sync("peer", nil, mode); err != nil {
  1236  		t.Fatalf("failed to synchronise blocks: %v", err)
  1237  	}
  1238  	assertOwnChain(t, tester, targetBlocks+1)
  1239  
  1240  	// Validate the number of block bodies that should have been requested
  1241  	bodiesNeeded, receiptsNeeded := 0, 0
  1242  	for _, block := range blocks {
  1243  		if mode != LightSync && block != tester.genesis && len(block.Transactions()) > 0 {
  1244  			bodiesNeeded++
  1245  		}
  1246  	}
  1247  	for _, receipt := range receipts {
  1248  		if mode == FastSync && len(receipt) > 0 {
  1249  			receiptsNeeded++
  1250  		}
  1251  	}
  1252  	if int(bodiesHave) != bodiesNeeded {
  1253  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1254  	}
  1255  	if int(receiptsHave) != receiptsNeeded {
  1256  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1257  	}
  1258  }
  1259  
  1260  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1261  // stalling the downloader by feeding gapped header chains.
  1262  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1263  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1264  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1265  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1266  func TestMissingHeaderAttack65Full(t *testing.T)  { testMissingHeaderAttack(t, 65, FullSync) }
  1267  func TestMissingHeaderAttack65Fast(t *testing.T)  { testMissingHeaderAttack(t, 65, FastSync) }
  1268  
  1269  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1270  	t.Parallel()
  1271  
  1272  	tester := newTester()
  1273  	defer tester.terminate()
  1274  
  1275  	// Create a small enough block chain to download
  1276  	targetBlocks := blockCacheMaxItems - 15
  1277  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1278  
  1279  	// Attempt a full sync with an attacker feeding gapped headers
  1280  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1281  	missing := targetBlocks / 2
  1282  	delete(tester.peerHeaders["attack"], hashes[missing])
  1283  
  1284  	if err := tester.sync("attack", nil, mode); err == nil {
  1285  		t.Fatalf("succeeded attacker synchronisation")
  1286  	}
  1287  	// Synchronise with the valid peer and make sure sync succeeds
  1288  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1289  	if err := tester.sync("valid", nil, mode); err != nil {
  1290  		t.Fatalf("failed to synchronise blocks: %v", err)
  1291  	}
  1292  	assertOwnChain(t, tester, targetBlocks+1)
  1293  }
  1294  
  1295  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1296  // detects the invalid numbering.
  1297  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1298  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1299  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1300  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1301  func TestShiftedHeaderAttack65Full(t *testing.T)  { testShiftedHeaderAttack(t, 65, FullSync) }
  1302  func TestShiftedHeaderAttack65Fast(t *testing.T)  { testShiftedHeaderAttack(t, 65, FastSync) }
  1303  
  1304  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1305  	t.Parallel()
  1306  
  1307  	tester := newTester()
  1308  	defer tester.terminate()
  1309  
  1310  	// Create a small enough block chain to download
  1311  	targetBlocks := blockCacheMaxItems - 15
  1312  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1313  
  1314  	// Attempt a full sync with an attacker feeding shifted headers
  1315  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1316  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1317  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1318  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1319  
  1320  	if err := tester.sync("attack", nil, mode); err == nil {
  1321  		t.Fatalf("succeeded attacker synchronisation")
  1322  	}
  1323  	// Synchronise with the valid peer and make sure sync succeeds
  1324  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1325  	if err := tester.sync("valid", nil, mode); err != nil {
  1326  		t.Fatalf("failed to synchronise blocks: %v", err)
  1327  	}
  1328  	assertOwnChain(t, tester, targetBlocks+1)
  1329  }
  1330  
  1331  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1332  // for various failure scenarios. Afterwards a full sync is attempted to make
  1333  // sure no state was corrupted.
  1334  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1335  func TestInvalidHeaderRollback65Fast(t *testing.T)  { testInvalidHeaderRollback(t, 65, FastSync) }
  1336  
  1337  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1338  	t.Parallel()
  1339  
  1340  	tester := newTester()
  1341  	defer tester.terminate()
  1342  
  1343  	// Create a small enough block chain to download
  1344  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1345  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1346  
  1347  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1348  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1349  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1350  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1351  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1352  
  1353  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1354  		t.Fatalf("succeeded fast attacker synchronisation")
  1355  	}
  1356  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1357  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1358  	}
  1359  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1360  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1361  	// rolled back, and also the pivot point being reverted to a non-block status.
  1362  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1363  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1364  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1365  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1366  
  1367  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1368  		t.Fatalf("succeeded block attacker synchronisation")
  1369  	}
  1370  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1371  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1372  	}
  1373  	if mode == FastSync {
  1374  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1375  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1376  		}
  1377  	}
  1378  	// Attempt to sync with an attacker that withholds promised blocks after the
  1379  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1380  	// but already imported pivot block.
  1381  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1382  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1383  
  1384  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1385  		for i := missing; i <= len(hashes); i++ {
  1386  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1387  		}
  1388  		tester.downloader.syncInitHook = nil
  1389  	}
  1390  
  1391  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1392  		t.Fatalf("succeeded withholding attacker synchronisation")
  1393  	}
  1394  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1395  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1396  	}
  1397  	if mode == FastSync {
  1398  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1399  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1400  		}
  1401  	}
  1402  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1403  	// rollback should also disable fast syncing for this process, verify that we
  1404  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1405  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1406  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1407  	if err := tester.sync("valid", nil, mode); err != nil {
  1408  		t.Fatalf("failed to synchronise blocks: %v", err)
  1409  	}
  1410  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1411  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1412  	}
  1413  	if mode != LightSync {
  1414  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1415  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1416  		}
  1417  	}
  1418  }
  1419  
  1420  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1421  // afterwards by not sending any useful hashes.
  1422  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1423  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1424  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1425  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1426  func TestHighTDStarvationAttack65Full(t *testing.T)  { testHighTDStarvationAttack(t, 65, FullSync) }
  1427  func TestHighTDStarvationAttack65Fast(t *testing.T)  { testHighTDStarvationAttack(t, 65, FastSync) }
  1428  
  1429  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1430  	t.Parallel()
  1431  
  1432  	tester := newTester()
  1433  	defer tester.terminate()
  1434  
  1435  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(0, 0, tester.genesis, nil, false)
  1436  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts, stakingInfos)
  1437  
  1438  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1439  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1440  	}
  1441  }
  1442  
  1443  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1444  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1445  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1446  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1447  func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, 65) }
  1448  
  1449  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1450  	t.Parallel()
  1451  
  1452  	// Define the disconnection requirement for individual hash fetch errors
  1453  	tests := []struct {
  1454  		result error
  1455  		drop   bool
  1456  	}{
  1457  		{nil, false},                        // Sync succeeded, all is well
  1458  		{errBusy, false},                    // Sync is already in progress, no problem
  1459  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1460  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1461  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1462  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1463  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1464  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1465  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1466  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1467  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1468  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1469  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1470  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1471  	}
  1472  	// Run the tests and check disconnection status
  1473  	tester := newTester()
  1474  	defer tester.terminate()
  1475  
  1476  	for i, tt := range tests {
  1477  		// Register a new peer and ensure it's presence
  1478  		id := fmt.Sprintf("test %d", i)
  1479  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil, nil); err != nil {
  1480  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1481  		}
  1482  		if _, ok := tester.peerHashes[id]; !ok {
  1483  			t.Fatalf("test %d: registered peer not found", i)
  1484  		}
  1485  		// Simulate a synchronisation and check the required result
  1486  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1487  
  1488  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1489  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1490  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1491  		}
  1492  	}
  1493  }
  1494  
  1495  // Tests that synchronisation progress (origin block number, current block number
  1496  // and highest block number) is tracked and updated correctly.
  1497  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1498  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1499  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1500  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1501  func TestSyncProgress65Full(t *testing.T)  { testSyncProgress(t, 65, FullSync) }
  1502  func TestSyncProgress65Fast(t *testing.T)  { testSyncProgress(t, 65, FastSync) }
  1503  
  1504  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1505  	t.Parallel()
  1506  
  1507  	tester := newTester()
  1508  	defer tester.terminate()
  1509  
  1510  	// Create a small enough block chain to download
  1511  	targetBlocks := blockCacheMaxItems - 15
  1512  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1513  
  1514  	// Set a sync init hook to catch progress changes
  1515  	starting := make(chan struct{})
  1516  	progress := make(chan struct{})
  1517  
  1518  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1519  		starting <- struct{}{}
  1520  		<-progress
  1521  	}
  1522  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1523  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1524  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1525  	}
  1526  	// Synchronise half the blocks and check initial progress
  1527  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts, stakingInfos)
  1528  	pending := new(sync.WaitGroup)
  1529  	pending.Add(1)
  1530  
  1531  	go func() {
  1532  		defer pending.Done()
  1533  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1534  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1535  		}
  1536  	}()
  1537  	<-starting
  1538  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1539  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1540  	}
  1541  	progress <- struct{}{}
  1542  	pending.Wait()
  1543  
  1544  	// Synchronise all the blocks and check continuation progress
  1545  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1546  	pending.Add(1)
  1547  
  1548  	go func() {
  1549  		defer pending.Done()
  1550  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1551  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1552  		}
  1553  	}()
  1554  	<-starting
  1555  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1556  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1557  	}
  1558  	progress <- struct{}{}
  1559  	pending.Wait()
  1560  
  1561  	// Check final progress after successful sync
  1562  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1563  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1564  	}
  1565  }
  1566  
  1567  // Tests that synchronisation progress (origin block number and highest block
  1568  // number) is tracked and updated correctly in case of a fork (or manual head
  1569  // revertal).
  1570  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1571  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1572  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1573  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1574  func TestForkedSyncProgress65Full(t *testing.T)  { testForkedSyncProgress(t, 65, FullSync) }
  1575  func TestForkedSyncProgress65Fast(t *testing.T)  { testForkedSyncProgress(t, 65, FastSync) }
  1576  
  1577  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1578  	t.Parallel()
  1579  
  1580  	tester := newTester()
  1581  	defer tester.terminate()
  1582  
  1583  	// Create a forked chain to simulate origin revertal
  1584  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1585  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, stakingInfosA, stakingInfosB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1586  
  1587  	// Set a sync init hook to catch progress changes
  1588  	starting := make(chan struct{})
  1589  	progress := make(chan struct{})
  1590  
  1591  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1592  		starting <- struct{}{}
  1593  		<-progress
  1594  	}
  1595  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1596  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1597  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1598  	}
  1599  	// Synchronise with one of the forks and check progress
  1600  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA, stakingInfosA)
  1601  	pending := new(sync.WaitGroup)
  1602  	pending.Add(1)
  1603  
  1604  	go func() {
  1605  		defer pending.Done()
  1606  		if err := tester.sync("fork A", nil, mode); err != nil {
  1607  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1608  		}
  1609  	}()
  1610  	<-starting
  1611  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1612  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1613  	}
  1614  	progress <- struct{}{}
  1615  	pending.Wait()
  1616  
  1617  	// Simulate a successful sync above the fork
  1618  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1619  
  1620  	// Synchronise with the second fork and check progress resets
  1621  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB, stakingInfosB)
  1622  	pending.Add(1)
  1623  
  1624  	go func() {
  1625  		defer pending.Done()
  1626  		if err := tester.sync("fork B", nil, mode); err != nil {
  1627  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1628  		}
  1629  	}()
  1630  	<-starting
  1631  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1632  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1633  	}
  1634  	progress <- struct{}{}
  1635  	pending.Wait()
  1636  
  1637  	// Check final progress after successful sync
  1638  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1639  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1640  	}
  1641  }
  1642  
  1643  // Tests that if synchronisation is aborted due to some failure, then the progress
  1644  // origin is not updated in the next sync cycle, as it should be considered the
  1645  // continuation of the previous sync and not a new instance.
  1646  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1647  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1648  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1649  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1650  func TestFailedSyncProgress65Full(t *testing.T)  { testFailedSyncProgress(t, 65, FullSync) }
  1651  func TestFailedSyncProgress65Fast(t *testing.T)  { testFailedSyncProgress(t, 65, FastSync) }
  1652  
  1653  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1654  	t.Parallel()
  1655  
  1656  	tester := newTester()
  1657  	defer tester.terminate()
  1658  
  1659  	// Create a small enough block chain to download
  1660  	targetBlocks := blockCacheMaxItems - 15
  1661  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1662  
  1663  	// Set a sync init hook to catch progress changes
  1664  	starting := make(chan struct{})
  1665  	progress := make(chan struct{})
  1666  
  1667  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1668  		starting <- struct{}{}
  1669  		<-progress
  1670  	}
  1671  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1672  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1673  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1674  	}
  1675  	// Attempt a full sync with a faulty peer
  1676  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1677  	missing := targetBlocks / 2
  1678  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1679  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1680  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1681  
  1682  	pending := new(sync.WaitGroup)
  1683  	pending.Add(1)
  1684  
  1685  	go func() {
  1686  		defer pending.Done()
  1687  		if err := tester.sync("faulty", nil, mode); err == nil {
  1688  			panic("succeeded faulty synchronisation")
  1689  		}
  1690  	}()
  1691  	<-starting
  1692  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1693  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1694  	}
  1695  	progress <- struct{}{}
  1696  	pending.Wait()
  1697  
  1698  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1699  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1700  	pending.Add(1)
  1701  
  1702  	go func() {
  1703  		defer pending.Done()
  1704  		if err := tester.sync("valid", nil, mode); err != nil {
  1705  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1706  		}
  1707  	}()
  1708  	<-starting
  1709  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1710  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1711  	}
  1712  	progress <- struct{}{}
  1713  	pending.Wait()
  1714  
  1715  	// Check final progress after successful sync
  1716  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1717  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1718  	}
  1719  }
  1720  
  1721  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1722  // the progress height is successfully reduced at the next sync invocation.
  1723  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1724  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1725  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1726  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1727  func TestFakedSyncProgress65Full(t *testing.T)  { testFakedSyncProgress(t, 65, FullSync) }
  1728  func TestFakedSyncProgress65Fast(t *testing.T)  { testFakedSyncProgress(t, 65, FastSync) }
  1729  
  1730  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1731  	t.Parallel()
  1732  
  1733  	tester := newTester()
  1734  	defer tester.terminate()
  1735  
  1736  	// Create a small block chain
  1737  	targetBlocks := blockCacheMaxItems - 15
  1738  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1739  
  1740  	// Set a sync init hook to catch progress changes
  1741  	starting := make(chan struct{})
  1742  	progress := make(chan struct{})
  1743  
  1744  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1745  		starting <- struct{}{}
  1746  		<-progress
  1747  	}
  1748  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1749  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1750  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1751  	}
  1752  	//  Create and sync with an attacker that promises a higher chain than available
  1753  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1754  	for i := 1; i < 3; i++ {
  1755  		delete(tester.peerHeaders["attack"], hashes[i])
  1756  		delete(tester.peerBlocks["attack"], hashes[i])
  1757  		delete(tester.peerReceipts["attack"], hashes[i])
  1758  	}
  1759  
  1760  	pending := new(sync.WaitGroup)
  1761  	pending.Add(1)
  1762  
  1763  	go func() {
  1764  		defer pending.Done()
  1765  		if err := tester.sync("attack", nil, mode); err == nil {
  1766  			panic("succeeded attacker synchronisation")
  1767  		}
  1768  	}()
  1769  	<-starting
  1770  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1771  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1772  	}
  1773  	progress <- struct{}{}
  1774  	pending.Wait()
  1775  
  1776  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1777  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts, stakingInfos)
  1778  	pending.Add(1)
  1779  
  1780  	go func() {
  1781  		defer pending.Done()
  1782  		if err := tester.sync("valid", nil, mode); err != nil {
  1783  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1784  		}
  1785  	}()
  1786  	<-starting
  1787  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1788  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1789  	}
  1790  	progress <- struct{}{}
  1791  	pending.Wait()
  1792  
  1793  	// Check final progress after successful sync
  1794  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1795  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1796  	}
  1797  }
  1798  
  1799  // TODO-Klaytn-Issue833 Disabled because this test fails intermittently in CI
  1800  /*
  1801  // This test reproduces an issue where unexpected deliveries would
  1802  // block indefinitely if they arrived at the right time.
  1803  // We use data driven subtests to manage this so that it will be parallel on its own
  1804  // and not with the other tests, avoiding intermittent failures.
  1805  func TestDeliverHeadersHang(t *testing.T) {
  1806  	testCases := []struct {
  1807  		protocol int
  1808  		syncMode SyncMode
  1809  	}{
  1810  		{62, FullSync},
  1811  		{63, FullSync},
  1812  		{63, FastSync},
  1813  		{64, FullSync},
  1814  		{64, FastSync},
  1815  		{64, LightSync},
  1816  	}
  1817  	for _, tc := range testCases {
  1818  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1819  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1820  		})
  1821  	}
  1822  }
  1823  */
  1824  
  1825  type floodingTestPeer struct {
  1826  	peer   Peer
  1827  	tester *downloadTester
  1828  	pend   sync.WaitGroup
  1829  }
  1830  
  1831  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1832  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1833  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1834  }
  1835  
  1836  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1837  	return ftp.peer.RequestBodies(hashes)
  1838  }
  1839  
  1840  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1841  	return ftp.peer.RequestReceipts(hashes)
  1842  }
  1843  
  1844  func (ftp *floodingTestPeer) RequestStakingInfo(hashes []common.Hash) error {
  1845  	return ftp.peer.RequestStakingInfo(hashes)
  1846  }
  1847  
  1848  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1849  	return ftp.peer.RequestNodeData(hashes)
  1850  }
  1851  
  1852  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1853  	deliveriesDone := make(chan struct{}, 500)
  1854  	for i := 0; i < cap(deliveriesDone); i++ {
  1855  		peer := fmt.Sprintf("fake-peer%d", i)
  1856  		ftp.pend.Add(1)
  1857  
  1858  		go func() {
  1859  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1860  			deliveriesDone <- struct{}{}
  1861  			ftp.pend.Done()
  1862  		}()
  1863  	}
  1864  	// Deliver the actual requested headers.
  1865  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1866  	// None of the extra deliveries should block.
  1867  	timeout := time.After(60 * time.Second)
  1868  	for i := 0; i < cap(deliveriesDone); i++ {
  1869  		select {
  1870  		case <-deliveriesDone:
  1871  		case <-timeout:
  1872  			panic("blocked")
  1873  		}
  1874  	}
  1875  	return nil
  1876  }
  1877  
  1878  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1879  	t.Parallel()
  1880  
  1881  	master := newTester()
  1882  	defer master.terminate()
  1883  
  1884  	hashes, headers, blocks, receipts, stakingInfos := master.makeChain(5, 0, master.genesis, nil, false)
  1885  	for i := 0; i < 200; i++ {
  1886  		tester := newTester()
  1887  		tester.peerDb = master.peerDb
  1888  
  1889  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1890  		// Whenever the downloader requests headers, flood it with
  1891  		// a lot of unrequested header deliveries.
  1892  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1893  			peer:   tester.downloader.peers.peers["peer"].peer,
  1894  			tester: tester,
  1895  		}
  1896  		if err := tester.sync("peer", nil, mode); err != nil {
  1897  			t.Errorf("test %d: sync failed: %v", i, err)
  1898  		}
  1899  		tester.terminate()
  1900  
  1901  		// Flush all goroutines to prevent messing with subsequent tests
  1902  		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1903  	}
  1904  }
  1905  
  1906  func TestStakingInfoSync(t *testing.T) { testStakingInfoSync(t, 65) }
  1907  
  1908  func testStakingInfoSync(t *testing.T, protocol int) {
  1909  	log.EnableLogForTest(log.LvlCrit, log.LvlInfo)
  1910  
  1911  	tester := newTester()
  1912  	defer tester.terminate()
  1913  
  1914  	// Create a small enough block chain to download
  1915  	targetBlocks := blockCacheMaxItems - 15
  1916  	hashes, headers, blocks, receipts, stakingInfos := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1917  
  1918  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, stakingInfos)
  1919  
  1920  	stakedBlocks := make([]uint64, len(stakingInfos))
  1921  	for blockHash, stakingInfo := range stakingInfos {
  1922  		stakedBlocks = append(stakedBlocks, stakingInfo.BlockNum)
  1923  		tester.stateDb.WriteCanonicalHash(blockHash, stakingInfo.BlockNum)
  1924  	}
  1925  
  1926  	// check staking information is not stored in database
  1927  	for _, block := range stakedBlocks {
  1928  		si, err := tester.stateDb.ReadStakingInfo(block)
  1929  		if len(si) != 0 && !strings.Contains(err.Error(), "data is not found with the given key") {
  1930  			t.Errorf("already staking info exists")
  1931  		}
  1932  	}
  1933  
  1934  	if err := tester.downloader.SyncStakingInfo("peer", 0, uint64(targetBlocks)); err != nil {
  1935  		t.Errorf("sync staking info failed: %v", err)
  1936  	}
  1937  
  1938  	time.Sleep(3 * time.Second)
  1939  
  1940  	for _, stakingInfo := range stakingInfos {
  1941  		expected, _ := json.Marshal(stakingInfo)
  1942  		actual, err := tester.stateDb.ReadStakingInfo(stakingInfo.BlockNum)
  1943  		if err != nil {
  1944  			t.Errorf("failed to read stakingInfo: %v", err)
  1945  		}
  1946  		if bytes.Compare(expected, actual) != 0 {
  1947  			t.Errorf("staking infos are different (expected: %v, actual: %v)", string(expected), string(actual))
  1948  		}
  1949  	}
  1950  }