github.com/aidoskuneen/adk-node@v0.0.0-20220315131952-2e32567cb7f4/eth/downloader/downloader_test.go (about)

     1  // Copyright 2021 The adkgo Authors
     2  // This file is part of the adkgo library (adapted for adkgo from go--ethereum v1.10.8).
     3  //
     4  // the adkgo library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // the adkgo library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the adkgo library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/aidoskuneen/adk-node"
    30  	"github.com/aidoskuneen/adk-node/common"
    31  	"github.com/aidoskuneen/adk-node/core/rawdb"
    32  	"github.com/aidoskuneen/adk-node/core/state/snapshot"
    33  	"github.com/aidoskuneen/adk-node/core/types"
    34  	"github.com/aidoskuneen/adk-node/eth/protocols/eth"
    35  	"github.com/aidoskuneen/adk-node/ethdb"
    36  	"github.com/aidoskuneen/adk-node/event"
    37  	"github.com/aidoskuneen/adk-node/trie"
    38  )
    39  
    40  // Reduce some of the parameters to make the tester faster.
    41  func init() {
    42  	fullMaxForkAncestry = 10000
    43  	lightMaxForkAncestry = 10000
    44  	blockCacheMaxItems = 1024
    45  	fsHeaderContCheck = 500 * time.Millisecond
    46  }
    47  
    48  // downloadTester is a test simulator for mocking out local block chain.
    49  type downloadTester struct {
    50  	downloader *Downloader
    51  
    52  	genesis *types.Block   // Genesis blocks used by the tester and peers
    53  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    54  	peerDb  ethdb.Database // Database of the peers containing all data
    55  	peers   map[string]*downloadTesterPeer
    56  
    57  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    58  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    59  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    60  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    61  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    62  
    63  	ancientHeaders  map[common.Hash]*types.Header  // Ancient headers belonging to the tester
    64  	ancientBlocks   map[common.Hash]*types.Block   // Ancient blocks belonging to the tester
    65  	ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
    66  	ancientChainTd  map[common.Hash]*big.Int       // Ancient total difficulties of the blocks in the local chain
    67  
    68  	lock sync.RWMutex
    69  }
    70  
    71  // newTester creates a new downloader test mocker.
    72  func newTester() *downloadTester {
    73  	tester := &downloadTester{
    74  		genesis:     testGenesis,
    75  		peerDb:      testDB,
    76  		peers:       make(map[string]*downloadTesterPeer),
    77  		ownHashes:   []common.Hash{testGenesis.Hash()},
    78  		ownHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    79  		ownBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    80  		ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    81  		ownChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    82  
    83  		// Initialize ancient store with test genesis block
    84  		ancientHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    85  		ancientBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    86  		ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    87  		ancientChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    88  	}
    89  	tester.stateDb = rawdb.NewMemoryDatabase()
    90  	tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
    91  
    92  	tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer)
    93  	return tester
    94  }
    95  
    96  // terminate aborts any operations on the embedded downloader and releases all
    97  // held resources.
    98  func (dl *downloadTester) terminate() {
    99  	dl.downloader.Terminate()
   100  }
   101  
   102  // sync starts synchronizing with a remote peer, blocking until it completes.
   103  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   104  	dl.lock.RLock()
   105  	hash := dl.peers[id].chain.headBlock().Hash()
   106  	// If no particular TD was requested, load from the peer's blockchain
   107  	if td == nil {
   108  		td = dl.peers[id].chain.td(hash)
   109  	}
   110  	dl.lock.RUnlock()
   111  
   112  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   113  	err := dl.downloader.synchronise(id, hash, td, mode)
   114  	select {
   115  	case <-dl.downloader.cancelCh:
   116  		// Ok, downloader fully cancelled after sync cycle
   117  	default:
   118  		// Downloader is still accepting packets, can block a peer up
   119  		panic("downloader active post sync cycle") // panic will be caught by tester
   120  	}
   121  	return err
   122  }
   123  
   124  // HasHeader checks if a header is present in the testers canonical chain.
   125  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   126  	return dl.GetHeaderByHash(hash) != nil
   127  }
   128  
   129  // HasBlock checks if a block is present in the testers canonical chain.
   130  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   131  	return dl.GetBlockByHash(hash) != nil
   132  }
   133  
   134  // HasFastBlock checks if a block is present in the testers canonical chain.
   135  func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
   136  	dl.lock.RLock()
   137  	defer dl.lock.RUnlock()
   138  
   139  	if _, ok := dl.ancientReceipts[hash]; ok {
   140  		return true
   141  	}
   142  	_, ok := dl.ownReceipts[hash]
   143  	return ok
   144  }
   145  
   146  // GetHeader retrieves a header from the testers canonical chain.
   147  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   148  	dl.lock.RLock()
   149  	defer dl.lock.RUnlock()
   150  	return dl.getHeaderByHash(hash)
   151  }
   152  
   153  // getHeaderByHash returns the header if found either within ancients or own blocks)
   154  // This method assumes that the caller holds at least the read-lock (dl.lock)
   155  func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
   156  	header := dl.ancientHeaders[hash]
   157  	if header != nil {
   158  		return header
   159  	}
   160  	return dl.ownHeaders[hash]
   161  }
   162  
   163  // GetBlock retrieves a block from the testers canonical chain.
   164  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   165  	dl.lock.RLock()
   166  	defer dl.lock.RUnlock()
   167  
   168  	block := dl.ancientBlocks[hash]
   169  	if block != nil {
   170  		return block
   171  	}
   172  	return dl.ownBlocks[hash]
   173  }
   174  
   175  // CurrentHeader retrieves the current head header from the canonical chain.
   176  func (dl *downloadTester) CurrentHeader() *types.Header {
   177  	dl.lock.RLock()
   178  	defer dl.lock.RUnlock()
   179  
   180  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   181  		if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
   182  			return header
   183  		}
   184  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   185  			return header
   186  		}
   187  	}
   188  	return dl.genesis.Header()
   189  }
   190  
   191  // CurrentBlock retrieves the current head block from the canonical chain.
   192  func (dl *downloadTester) CurrentBlock() *types.Block {
   193  	dl.lock.RLock()
   194  	defer dl.lock.RUnlock()
   195  
   196  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   197  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   198  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   199  				return block
   200  			}
   201  			return block
   202  		}
   203  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   204  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   205  				return block
   206  			}
   207  		}
   208  	}
   209  	return dl.genesis
   210  }
   211  
   212  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   213  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   214  	dl.lock.RLock()
   215  	defer dl.lock.RUnlock()
   216  
   217  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   218  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   219  			return block
   220  		}
   221  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   222  			return block
   223  		}
   224  	}
   225  	return dl.genesis
   226  }
   227  
   228  // FastSyncCommitHead manually sets the head block to a given hash.
   229  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   230  	// For now only check that the state trie is correct
   231  	if block := dl.GetBlockByHash(hash); block != nil {
   232  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
   233  		return err
   234  	}
   235  	return fmt.Errorf("non existent block: %x", hash[:4])
   236  }
   237  
   238  // GetTd retrieves the block's total difficulty from the canonical chain.
   239  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   240  	dl.lock.RLock()
   241  	defer dl.lock.RUnlock()
   242  
   243  	return dl.getTd(hash)
   244  }
   245  
   246  // getTd retrieves the block's total difficulty if found either within
   247  // ancients or own blocks).
   248  // This method assumes that the caller holds at least the read-lock (dl.lock)
   249  func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
   250  	if td := dl.ancientChainTd[hash]; td != nil {
   251  		return td
   252  	}
   253  	return dl.ownChainTd[hash]
   254  }
   255  
   256  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   257  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
   258  	dl.lock.Lock()
   259  	defer dl.lock.Unlock()
   260  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   261  	if dl.getHeaderByHash(headers[0].ParentHash) == nil {
   262  		return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
   263  	}
   264  	var hashes []common.Hash
   265  	for i := 1; i < len(headers); i++ {
   266  		hash := headers[i-1].Hash()
   267  		if headers[i].ParentHash != headers[i-1].Hash() {
   268  			return i, fmt.Errorf("non-contiguous import at position %d", i)
   269  		}
   270  		hashes = append(hashes, hash)
   271  	}
   272  	hashes = append(hashes, headers[len(headers)-1].Hash())
   273  	// Do a full insert if pre-checks passed
   274  	for i, header := range headers {
   275  		hash := hashes[i]
   276  		if dl.getHeaderByHash(hash) != nil {
   277  			continue
   278  		}
   279  		if dl.getHeaderByHash(header.ParentHash) == nil {
   280  			// This _should_ be impossible, due to precheck and induction
   281  			return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
   282  		}
   283  		dl.ownHashes = append(dl.ownHashes, hash)
   284  		dl.ownHeaders[hash] = header
   285  
   286  		td := dl.getTd(header.ParentHash)
   287  		dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
   288  	}
   289  	return len(headers), nil
   290  }
   291  
   292  // InsertChain injects a new batch of blocks into the simulated chain.
   293  func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
   294  	dl.lock.Lock()
   295  	defer dl.lock.Unlock()
   296  	for i, block := range blocks {
   297  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   298  			return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
   299  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   300  			return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
   301  		}
   302  		if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
   303  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   304  			dl.ownHeaders[block.Hash()] = block.Header()
   305  		}
   306  		dl.ownBlocks[block.Hash()] = block
   307  		dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
   308  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   309  		td := dl.getTd(block.ParentHash())
   310  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
   311  	}
   312  	return len(blocks), nil
   313  }
   314  
   315  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   316  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
   317  	dl.lock.Lock()
   318  	defer dl.lock.Unlock()
   319  
   320  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   321  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   322  			return i, errors.New("unknown owner")
   323  		}
   324  		if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
   325  			if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   326  				return i, errors.New("InsertReceiptChain: unknown parent")
   327  			}
   328  		}
   329  		if blocks[i].NumberU64() <= ancientLimit {
   330  			dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
   331  			dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
   332  
   333  			// Migrate from active db to ancient db
   334  			dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
   335  			dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
   336  			delete(dl.ownHeaders, blocks[i].Hash())
   337  			delete(dl.ownChainTd, blocks[i].Hash())
   338  		} else {
   339  			dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   340  			dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   341  		}
   342  	}
   343  	return len(blocks), nil
   344  }
   345  
   346  // SetHead rewinds the local chain to a new head.
   347  func (dl *downloadTester) SetHead(head uint64) error {
   348  	dl.lock.Lock()
   349  	defer dl.lock.Unlock()
   350  
   351  	// Find the hash of the head to reset to
   352  	var hash common.Hash
   353  	for h, header := range dl.ownHeaders {
   354  		if header.Number.Uint64() == head {
   355  			hash = h
   356  		}
   357  	}
   358  	for h, header := range dl.ancientHeaders {
   359  		if header.Number.Uint64() == head {
   360  			hash = h
   361  		}
   362  	}
   363  	if hash == (common.Hash{}) {
   364  		return fmt.Errorf("unknown head to set: %d", head)
   365  	}
   366  	// Find the offset in the header chain
   367  	var offset int
   368  	for o, h := range dl.ownHashes {
   369  		if h == hash {
   370  			offset = o
   371  			break
   372  		}
   373  	}
   374  	// Remove all the hashes and associated data afterwards
   375  	for i := offset + 1; i < len(dl.ownHashes); i++ {
   376  		delete(dl.ownChainTd, dl.ownHashes[i])
   377  		delete(dl.ownHeaders, dl.ownHashes[i])
   378  		delete(dl.ownReceipts, dl.ownHashes[i])
   379  		delete(dl.ownBlocks, dl.ownHashes[i])
   380  
   381  		delete(dl.ancientChainTd, dl.ownHashes[i])
   382  		delete(dl.ancientHeaders, dl.ownHashes[i])
   383  		delete(dl.ancientReceipts, dl.ownHashes[i])
   384  		delete(dl.ancientBlocks, dl.ownHashes[i])
   385  	}
   386  	dl.ownHashes = dl.ownHashes[:offset+1]
   387  	return nil
   388  }
   389  
   390  // Rollback removes some recently added elements from the chain.
   391  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   392  }
   393  
   394  // newPeer registers a new block download source into the downloader.
   395  func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error {
   396  	dl.lock.Lock()
   397  	defer dl.lock.Unlock()
   398  
   399  	peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
   400  	dl.peers[id] = peer
   401  	return dl.downloader.RegisterPeer(id, version, peer)
   402  }
   403  
   404  // dropPeer simulates a hard peer removal from the connection pool.
   405  func (dl *downloadTester) dropPeer(id string) {
   406  	dl.lock.Lock()
   407  	defer dl.lock.Unlock()
   408  
   409  	delete(dl.peers, id)
   410  	dl.downloader.UnregisterPeer(id)
   411  }
   412  
   413  // Snapshots implements the BlockChain interface for the downloader, but is a noop.
   414  func (dl *downloadTester) Snapshots() *snapshot.Tree {
   415  	return nil
   416  }
   417  
   418  type downloadTesterPeer struct {
   419  	dl            *downloadTester
   420  	id            string
   421  	chain         *testChain
   422  	missingStates map[common.Hash]bool // State entries that fast sync should not return
   423  }
   424  
   425  // Head constructs a function to retrieve a peer's current head hash
   426  // and total difficulty.
   427  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   428  	b := dlp.chain.headBlock()
   429  	return b.Hash(), dlp.chain.td(b.Hash())
   430  }
   431  
   432  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   433  // origin; associated with a particular peer in the download tester. The returned
   434  // function can be used to retrieve batches of headers from the particular peer.
   435  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   436  	result := dlp.chain.headersByHash(origin, amount, skip, reverse)
   437  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   438  	return nil
   439  }
   440  
   441  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   442  // origin; associated with a particular peer in the download tester. The returned
   443  // function can be used to retrieve batches of headers from the particular peer.
   444  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   445  	result := dlp.chain.headersByNumber(origin, amount, skip, reverse)
   446  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   447  	return nil
   448  }
   449  
   450  // RequestBodies constructs a getBlockBodies method associated with a particular
   451  // peer in the download tester. The returned function can be used to retrieve
   452  // batches of block bodies from the particularly requested peer.
   453  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   454  	txs, uncles := dlp.chain.bodies(hashes)
   455  	go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
   456  	return nil
   457  }
   458  
   459  // RequestReceipts constructs a getReceipts method associated with a particular
   460  // peer in the download tester. The returned function can be used to retrieve
   461  // batches of block receipts from the particularly requested peer.
   462  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   463  	receipts := dlp.chain.receipts(hashes)
   464  	go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
   465  	return nil
   466  }
   467  
   468  // RequestNodeData constructs a getNodeData method associated with a particular
   469  // peer in the download tester. The returned function can be used to retrieve
   470  // batches of node state data from the particularly requested peer.
   471  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   472  	dlp.dl.lock.RLock()
   473  	defer dlp.dl.lock.RUnlock()
   474  
   475  	results := make([][]byte, 0, len(hashes))
   476  	for _, hash := range hashes {
   477  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   478  			if !dlp.missingStates[hash] {
   479  				results = append(results, data)
   480  			}
   481  		}
   482  	}
   483  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   484  	return nil
   485  }
   486  
   487  // assertOwnChain checks if the local chain contains the correct number of items
   488  // of the various chain components.
   489  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   490  	// Mark this method as a helper to report errors at callsite, not in here
   491  	t.Helper()
   492  
   493  	assertOwnForkedChain(t, tester, 1, []int{length})
   494  }
   495  
   496  // assertOwnForkedChain checks if the local forked chain contains the correct
   497  // number of items of the various chain components.
   498  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   499  	// Mark this method as a helper to report errors at callsite, not in here
   500  	t.Helper()
   501  
   502  	// Initialize the counters for the first fork
   503  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
   504  
   505  	// Update the counters for each subsequent fork
   506  	for _, length := range lengths[1:] {
   507  		headers += length - common
   508  		blocks += length - common
   509  		receipts += length - common
   510  	}
   511  	if tester.downloader.getMode() == LightSync {
   512  		blocks, receipts = 1, 1
   513  	}
   514  	if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
   515  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   516  	}
   517  	if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
   518  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   519  	}
   520  	if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
   521  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   522  	}
   523  }
   524  
   525  func TestCanonicalSynchronisation65Full(t *testing.T)  { testCanonSync(t, eth.ETH65, FullSync) }
   526  func TestCanonicalSynchronisation65Fast(t *testing.T)  { testCanonSync(t, eth.ETH65, FastSync) }
   527  func TestCanonicalSynchronisation65Light(t *testing.T) { testCanonSync(t, eth.ETH65, LightSync) }
   528  
   529  func TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, eth.ETH66, FullSync) }
   530  func TestCanonicalSynchronisation66Fast(t *testing.T)  { testCanonSync(t, eth.ETH66, FastSync) }
   531  func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
   532  
   533  func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
   534  	t.Parallel()
   535  
   536  	tester := newTester()
   537  	defer tester.terminate()
   538  
   539  	// Create a small enough block chain to download
   540  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   541  	tester.newPeer("peer", protocol, chain)
   542  
   543  	// Synchronise with the peer and make sure all relevant data was retrieved
   544  	if err := tester.sync("peer", nil, mode); err != nil {
   545  		t.Fatalf("failed to synchronise blocks: %v", err)
   546  	}
   547  	assertOwnChain(t, tester, chain.len())
   548  }
   549  
   550  // Tests that if a large batch of blocks are being downloaded, it is throttled
   551  // until the cached blocks are retrieved.
   552  func TestThrottling65Full(t *testing.T) { testThrottling(t, eth.ETH65, FullSync) }
   553  func TestThrottling65Fast(t *testing.T) { testThrottling(t, eth.ETH65, FastSync) }
   554  
   555  func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
   556  func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) }
   557  
   558  func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
   559  	t.Parallel()
   560  	tester := newTester()
   561  
   562  	// Create a long block chain to download and the tester
   563  	targetBlocks := testChainBase.len() - 1
   564  	tester.newPeer("peer", protocol, testChainBase)
   565  
   566  	// Wrap the importer to allow stepping
   567  	blocked, proceed := uint32(0), make(chan struct{})
   568  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   569  		atomic.StoreUint32(&blocked, uint32(len(results)))
   570  		<-proceed
   571  	}
   572  	// Start a synchronisation concurrently
   573  	errc := make(chan error, 1)
   574  	go func() {
   575  		errc <- tester.sync("peer", nil, mode)
   576  	}()
   577  	// Iteratively take some blocks, always checking the retrieval count
   578  	for {
   579  		// Check the retrieval count synchronously (! reason for this ugly block)
   580  		tester.lock.RLock()
   581  		retrieved := len(tester.ownBlocks)
   582  		tester.lock.RUnlock()
   583  		if retrieved >= targetBlocks+1 {
   584  			break
   585  		}
   586  		// Wait a bit for sync to throttle itself
   587  		var cached, frozen int
   588  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   589  			time.Sleep(25 * time.Millisecond)
   590  
   591  			tester.lock.Lock()
   592  			tester.downloader.queue.lock.Lock()
   593  			tester.downloader.queue.resultCache.lock.Lock()
   594  			{
   595  				cached = tester.downloader.queue.resultCache.countCompleted()
   596  				frozen = int(atomic.LoadUint32(&blocked))
   597  				retrieved = len(tester.ownBlocks)
   598  			}
   599  			tester.downloader.queue.resultCache.lock.Unlock()
   600  			tester.downloader.queue.lock.Unlock()
   601  			tester.lock.Unlock()
   602  
   603  			if cached == blockCacheMaxItems ||
   604  				cached == blockCacheMaxItems-reorgProtHeaderDelay ||
   605  				retrieved+cached+frozen == targetBlocks+1 ||
   606  				retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   607  				break
   608  			}
   609  		}
   610  		// Make sure we filled up the cache, then exhaust it
   611  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   612  		tester.lock.RLock()
   613  		retrieved = len(tester.ownBlocks)
   614  		tester.lock.RUnlock()
   615  		if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   616  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
   617  		}
   618  
   619  		// Permit the blocked blocks to import
   620  		if atomic.LoadUint32(&blocked) > 0 {
   621  			atomic.StoreUint32(&blocked, uint32(0))
   622  			proceed <- struct{}{}
   623  		}
   624  	}
   625  	// Check that we haven't pulled more blocks than available
   626  	assertOwnChain(t, tester, targetBlocks+1)
   627  	if err := <-errc; err != nil {
   628  		t.Fatalf("block synchronization failed: %v", err)
   629  	}
   630  	tester.terminate()
   631  
   632  }
   633  
   634  // Tests that simple synchronization against a forked chain works correctly. In
   635  // this test common ancestor lookup should *not* be short circuited, and a full
   636  // binary search should be executed.
   637  func TestForkedSync65Full(t *testing.T)  { testForkedSync(t, eth.ETH65, FullSync) }
   638  func TestForkedSync65Fast(t *testing.T)  { testForkedSync(t, eth.ETH65, FastSync) }
   639  func TestForkedSync65Light(t *testing.T) { testForkedSync(t, eth.ETH65, LightSync) }
   640  
   641  func TestForkedSync66Full(t *testing.T)  { testForkedSync(t, eth.ETH66, FullSync) }
   642  func TestForkedSync66Fast(t *testing.T)  { testForkedSync(t, eth.ETH66, FastSync) }
   643  func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
   644  
   645  func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   646  	t.Parallel()
   647  
   648  	tester := newTester()
   649  	defer tester.terminate()
   650  
   651  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   652  	chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
   653  	tester.newPeer("fork A", protocol, chainA)
   654  	tester.newPeer("fork B", protocol, chainB)
   655  	// Synchronise with the peer and make sure all blocks were retrieved
   656  	if err := tester.sync("fork A", nil, mode); err != nil {
   657  		t.Fatalf("failed to synchronise blocks: %v", err)
   658  	}
   659  	assertOwnChain(t, tester, chainA.len())
   660  
   661  	// Synchronise with the second peer and make sure that fork is pulled too
   662  	if err := tester.sync("fork B", nil, mode); err != nil {
   663  		t.Fatalf("failed to synchronise blocks: %v", err)
   664  	}
   665  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   666  }
   667  
   668  // Tests that synchronising against a much shorter but much heavyer fork works
   669  // corrently and is not dropped.
   670  func TestHeavyForkedSync65Full(t *testing.T)  { testHeavyForkedSync(t, eth.ETH65, FullSync) }
   671  func TestHeavyForkedSync65Fast(t *testing.T)  { testHeavyForkedSync(t, eth.ETH65, FastSync) }
   672  func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, LightSync) }
   673  
   674  func TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FullSync) }
   675  func TestHeavyForkedSync66Fast(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FastSync) }
   676  func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
   677  
   678  func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   679  	t.Parallel()
   680  
   681  	tester := newTester()
   682  	defer tester.terminate()
   683  
   684  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   685  	chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
   686  	tester.newPeer("light", protocol, chainA)
   687  	tester.newPeer("heavy", protocol, chainB)
   688  
   689  	// Synchronise with the peer and make sure all blocks were retrieved
   690  	if err := tester.sync("light", nil, mode); err != nil {
   691  		t.Fatalf("failed to synchronise blocks: %v", err)
   692  	}
   693  	assertOwnChain(t, tester, chainA.len())
   694  
   695  	// Synchronise with the second peer and make sure that fork is pulled too
   696  	if err := tester.sync("heavy", nil, mode); err != nil {
   697  		t.Fatalf("failed to synchronise blocks: %v", err)
   698  	}
   699  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   700  }
   701  
   702  // Tests that chain forks are contained within a certain interval of the current
   703  // chain head, ensuring that malicious peers cannot waste resources by feeding
   704  // long dead chains.
   705  func TestBoundedForkedSync65Full(t *testing.T)  { testBoundedForkedSync(t, eth.ETH65, FullSync) }
   706  func TestBoundedForkedSync65Fast(t *testing.T)  { testBoundedForkedSync(t, eth.ETH65, FastSync) }
   707  func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, LightSync) }
   708  
   709  func TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FullSync) }
   710  func TestBoundedForkedSync66Fast(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FastSync) }
   711  func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
   712  
   713  func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   714  	t.Parallel()
   715  
   716  	tester := newTester()
   717  	defer tester.terminate()
   718  
   719  	chainA := testChainForkLightA
   720  	chainB := testChainForkLightB
   721  	tester.newPeer("original", protocol, chainA)
   722  	tester.newPeer("rewriter", protocol, chainB)
   723  
   724  	// Synchronise with the peer and make sure all blocks were retrieved
   725  	if err := tester.sync("original", nil, mode); err != nil {
   726  		t.Fatalf("failed to synchronise blocks: %v", err)
   727  	}
   728  	assertOwnChain(t, tester, chainA.len())
   729  
   730  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   731  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   732  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   733  	}
   734  }
   735  
   736  // Tests that chain forks are contained within a certain interval of the current
   737  // chain head for short but heavy forks too. These are a bit special because they
   738  // take different ancestor lookup paths.
   739  func TestBoundedHeavyForkedSync65Full(t *testing.T) {
   740  	testBoundedHeavyForkedSync(t, eth.ETH65, FullSync)
   741  }
   742  func TestBoundedHeavyForkedSync65Fast(t *testing.T) {
   743  	testBoundedHeavyForkedSync(t, eth.ETH65, FastSync)
   744  }
   745  func TestBoundedHeavyForkedSync65Light(t *testing.T) {
   746  	testBoundedHeavyForkedSync(t, eth.ETH65, LightSync)
   747  }
   748  
   749  func TestBoundedHeavyForkedSync66Full(t *testing.T) {
   750  	testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
   751  }
   752  func TestBoundedHeavyForkedSync66Fast(t *testing.T) {
   753  	testBoundedHeavyForkedSync(t, eth.ETH66, FastSync)
   754  }
   755  func TestBoundedHeavyForkedSync66Light(t *testing.T) {
   756  	testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
   757  }
   758  
   759  func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   760  	t.Parallel()
   761  	tester := newTester()
   762  
   763  	// Create a long enough forked chain
   764  	chainA := testChainForkLightA
   765  	chainB := testChainForkHeavy
   766  	tester.newPeer("original", protocol, chainA)
   767  
   768  	// Synchronise with the peer and make sure all blocks were retrieved
   769  	if err := tester.sync("original", nil, mode); err != nil {
   770  		t.Fatalf("failed to synchronise blocks: %v", err)
   771  	}
   772  	assertOwnChain(t, tester, chainA.len())
   773  
   774  	tester.newPeer("heavy-rewriter", protocol, chainB)
   775  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   776  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   777  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   778  	}
   779  	tester.terminate()
   780  }
   781  
   782  // Tests that an inactive downloader will not accept incoming block headers,
   783  // bodies and receipts.
   784  func TestInactiveDownloader63(t *testing.T) {
   785  	t.Parallel()
   786  
   787  	tester := newTester()
   788  	defer tester.terminate()
   789  
   790  	// Check that neither block headers nor bodies are accepted
   791  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   792  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   793  	}
   794  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   795  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   796  	}
   797  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   798  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   799  	}
   800  }
   801  
   802  // Tests that a canceled download wipes all previously accumulated state.
   803  func TestCancel65Full(t *testing.T)  { testCancel(t, eth.ETH65, FullSync) }
   804  func TestCancel65Fast(t *testing.T)  { testCancel(t, eth.ETH65, FastSync) }
   805  func TestCancel65Light(t *testing.T) { testCancel(t, eth.ETH65, LightSync) }
   806  
   807  func TestCancel66Full(t *testing.T)  { testCancel(t, eth.ETH66, FullSync) }
   808  func TestCancel66Fast(t *testing.T)  { testCancel(t, eth.ETH66, FastSync) }
   809  func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
   810  
   811  func testCancel(t *testing.T, protocol uint, mode SyncMode) {
   812  	t.Parallel()
   813  
   814  	tester := newTester()
   815  	defer tester.terminate()
   816  
   817  	chain := testChainBase.shorten(MaxHeaderFetch)
   818  	tester.newPeer("peer", protocol, chain)
   819  
   820  	// Make sure canceling works with a pristine downloader
   821  	tester.downloader.Cancel()
   822  	if !tester.downloader.queue.Idle() {
   823  		t.Errorf("download queue not idle")
   824  	}
   825  	// Synchronise with the peer, but cancel afterwards
   826  	if err := tester.sync("peer", nil, mode); err != nil {
   827  		t.Fatalf("failed to synchronise blocks: %v", err)
   828  	}
   829  	tester.downloader.Cancel()
   830  	if !tester.downloader.queue.Idle() {
   831  		t.Errorf("download queue not idle")
   832  	}
   833  }
   834  
   835  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   836  func TestMultiSynchronisation65Full(t *testing.T)  { testMultiSynchronisation(t, eth.ETH65, FullSync) }
   837  func TestMultiSynchronisation65Fast(t *testing.T)  { testMultiSynchronisation(t, eth.ETH65, FastSync) }
   838  func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, LightSync) }
   839  
   840  func TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FullSync) }
   841  func TestMultiSynchronisation66Fast(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FastSync) }
   842  func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
   843  
   844  func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
   845  	t.Parallel()
   846  
   847  	tester := newTester()
   848  	defer tester.terminate()
   849  
   850  	// Create various peers with various parts of the chain
   851  	targetPeers := 8
   852  	chain := testChainBase.shorten(targetPeers * 100)
   853  
   854  	for i := 0; i < targetPeers; i++ {
   855  		id := fmt.Sprintf("peer #%d", i)
   856  		tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
   857  	}
   858  	if err := tester.sync("peer #0", nil, mode); err != nil {
   859  		t.Fatalf("failed to synchronise blocks: %v", err)
   860  	}
   861  	assertOwnChain(t, tester, chain.len())
   862  }
   863  
   864  // Tests that synchronisations behave well in multi-version protocol environments
   865  // and not wreak havoc on other nodes in the network.
   866  func TestMultiProtoSynchronisation65Full(t *testing.T)  { testMultiProtoSync(t, eth.ETH65, FullSync) }
   867  func TestMultiProtoSynchronisation65Fast(t *testing.T)  { testMultiProtoSync(t, eth.ETH65, FastSync) }
   868  func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, eth.ETH65, LightSync) }
   869  
   870  func TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FullSync) }
   871  func TestMultiProtoSynchronisation66Fast(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FastSync) }
   872  func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
   873  
   874  func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
   875  	t.Parallel()
   876  
   877  	tester := newTester()
   878  	defer tester.terminate()
   879  
   880  	// Create a small enough block chain to download
   881  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   882  
   883  	// Create peers of every type
   884  	tester.newPeer("peer 65", eth.ETH65, chain)
   885  	tester.newPeer("peer 66", eth.ETH66, chain)
   886  
   887  	// Synchronise with the requested peer and make sure all blocks were retrieved
   888  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   889  		t.Fatalf("failed to synchronise blocks: %v", err)
   890  	}
   891  	assertOwnChain(t, tester, chain.len())
   892  
   893  	// Check that no peers have been dropped off
   894  	for _, version := range []int{65, 66} {
   895  		peer := fmt.Sprintf("peer %d", version)
   896  		if _, ok := tester.peers[peer]; !ok {
   897  			t.Errorf("%s dropped", peer)
   898  		}
   899  	}
   900  }
   901  
   902  // Tests that if a block is empty (e.g. header only), no body request should be
   903  // made, and instead the header should be assembled into a whole block in itself.
   904  func TestEmptyShortCircuit65Full(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH65, FullSync) }
   905  func TestEmptyShortCircuit65Fast(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH65, FastSync) }
   906  func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, LightSync) }
   907  
   908  func TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
   909  func TestEmptyShortCircuit66Fast(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FastSync) }
   910  func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
   911  
   912  func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
   913  	t.Parallel()
   914  
   915  	tester := newTester()
   916  	defer tester.terminate()
   917  
   918  	// Create a block chain to download
   919  	chain := testChainBase
   920  	tester.newPeer("peer", protocol, chain)
   921  
   922  	// Instrument the downloader to signal body requests
   923  	bodiesHave, receiptsHave := int32(0), int32(0)
   924  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   925  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   926  	}
   927  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   928  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   929  	}
   930  	// Synchronise with the peer and make sure all blocks were retrieved
   931  	if err := tester.sync("peer", nil, mode); err != nil {
   932  		t.Fatalf("failed to synchronise blocks: %v", err)
   933  	}
   934  	assertOwnChain(t, tester, chain.len())
   935  
   936  	// Validate the number of block bodies that should have been requested
   937  	bodiesNeeded, receiptsNeeded := 0, 0
   938  	for _, block := range chain.blockm {
   939  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   940  			bodiesNeeded++
   941  		}
   942  	}
   943  	for _, receipt := range chain.receiptm {
   944  		if mode == FastSync && len(receipt) > 0 {
   945  			receiptsNeeded++
   946  		}
   947  	}
   948  	if int(bodiesHave) != bodiesNeeded {
   949  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   950  	}
   951  	if int(receiptsHave) != receiptsNeeded {
   952  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   953  	}
   954  }
   955  
   956  // Tests that headers are enqueued continuously, preventing malicious nodes from
   957  // stalling the downloader by feeding gapped header chains.
   958  func TestMissingHeaderAttack65Full(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH65, FullSync) }
   959  func TestMissingHeaderAttack65Fast(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH65, FastSync) }
   960  func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, LightSync) }
   961  
   962  func TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
   963  func TestMissingHeaderAttack66Fast(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FastSync) }
   964  func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
   965  
   966  func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   967  	t.Parallel()
   968  
   969  	tester := newTester()
   970  	defer tester.terminate()
   971  
   972  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   973  	brokenChain := chain.shorten(chain.len())
   974  	delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
   975  	tester.newPeer("attack", protocol, brokenChain)
   976  
   977  	if err := tester.sync("attack", nil, mode); err == nil {
   978  		t.Fatalf("succeeded attacker synchronisation")
   979  	}
   980  	// Synchronise with the valid peer and make sure sync succeeds
   981  	tester.newPeer("valid", protocol, chain)
   982  	if err := tester.sync("valid", nil, mode); err != nil {
   983  		t.Fatalf("failed to synchronise blocks: %v", err)
   984  	}
   985  	assertOwnChain(t, tester, chain.len())
   986  }
   987  
   988  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   989  // detects the invalid numbering.
   990  func TestShiftedHeaderAttack65Full(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH65, FullSync) }
   991  func TestShiftedHeaderAttack65Fast(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH65, FastSync) }
   992  func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, LightSync) }
   993  
   994  func TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
   995  func TestShiftedHeaderAttack66Fast(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FastSync) }
   996  func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
   997  
   998  func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   999  	t.Parallel()
  1000  
  1001  	tester := newTester()
  1002  	defer tester.terminate()
  1003  
  1004  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1005  
  1006  	// Attempt a full sync with an attacker feeding shifted headers
  1007  	brokenChain := chain.shorten(chain.len())
  1008  	delete(brokenChain.headerm, brokenChain.chain[1])
  1009  	delete(brokenChain.blockm, brokenChain.chain[1])
  1010  	delete(brokenChain.receiptm, brokenChain.chain[1])
  1011  	tester.newPeer("attack", protocol, brokenChain)
  1012  	if err := tester.sync("attack", nil, mode); err == nil {
  1013  		t.Fatalf("succeeded attacker synchronisation")
  1014  	}
  1015  
  1016  	// Synchronise with the valid peer and make sure sync succeeds
  1017  	tester.newPeer("valid", protocol, chain)
  1018  	if err := tester.sync("valid", nil, mode); err != nil {
  1019  		t.Fatalf("failed to synchronise blocks: %v", err)
  1020  	}
  1021  	assertOwnChain(t, tester, chain.len())
  1022  }
  1023  
  1024  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1025  // for various failure scenarios. Afterwards a full sync is attempted to make
  1026  // sure no state was corrupted.
  1027  func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH65, FastSync) }
  1028  func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) }
  1029  
  1030  func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
  1031  	t.Parallel()
  1032  
  1033  	tester := newTester()
  1034  
  1035  	// Create a small enough block chain to download
  1036  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1037  	chain := testChainBase.shorten(targetBlocks)
  1038  
  1039  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1040  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1041  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1042  	fastAttackChain := chain.shorten(chain.len())
  1043  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
  1044  	tester.newPeer("fast-attack", protocol, fastAttackChain)
  1045  
  1046  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1047  		t.Fatalf("succeeded fast attacker synchronisation")
  1048  	}
  1049  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1050  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1051  	}
  1052  
  1053  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1054  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1055  	// rolled back, and also the pivot point being reverted to a non-block status.
  1056  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1057  	blockAttackChain := chain.shorten(chain.len())
  1058  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
  1059  	delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
  1060  	tester.newPeer("block-attack", protocol, blockAttackChain)
  1061  
  1062  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1063  		t.Fatalf("succeeded block attacker synchronisation")
  1064  	}
  1065  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1066  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1067  	}
  1068  	if mode == FastSync {
  1069  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1070  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1071  		}
  1072  	}
  1073  
  1074  	// Attempt to sync with an attacker that withholds promised blocks after the
  1075  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1076  	// but already imported pivot block.
  1077  	withholdAttackChain := chain.shorten(chain.len())
  1078  	tester.newPeer("withhold-attack", protocol, withholdAttackChain)
  1079  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1080  		for i := missing; i < withholdAttackChain.len(); i++ {
  1081  			delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
  1082  		}
  1083  		tester.downloader.syncInitHook = nil
  1084  	}
  1085  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1086  		t.Fatalf("succeeded withholding attacker synchronisation")
  1087  	}
  1088  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1089  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1090  	}
  1091  	if mode == FastSync {
  1092  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1093  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1094  		}
  1095  	}
  1096  
  1097  	// synchronise with the valid peer and make sure sync succeeds. Since the last rollback
  1098  	// should also disable fast syncing for this process, verify that we did a fresh full
  1099  	// sync. Note, we can't assert anything about the receipts since we won't purge the
  1100  	// database of them, hence we can't use assertOwnChain.
  1101  	tester.newPeer("valid", protocol, chain)
  1102  	if err := tester.sync("valid", nil, mode); err != nil {
  1103  		t.Fatalf("failed to synchronise blocks: %v", err)
  1104  	}
  1105  	if hs := len(tester.ownHeaders); hs != chain.len() {
  1106  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
  1107  	}
  1108  	if mode != LightSync {
  1109  		if bs := len(tester.ownBlocks); bs != chain.len() {
  1110  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
  1111  		}
  1112  	}
  1113  	tester.terminate()
  1114  }
  1115  
  1116  // Tests that a peer advertising a high TD doesn't get to stall the downloader
  1117  // afterwards by not sending any useful hashes.
  1118  func TestHighTDStarvationAttack65Full(t *testing.T) {
  1119  	testHighTDStarvationAttack(t, eth.ETH65, FullSync)
  1120  }
  1121  func TestHighTDStarvationAttack65Fast(t *testing.T) {
  1122  	testHighTDStarvationAttack(t, eth.ETH65, FastSync)
  1123  }
  1124  func TestHighTDStarvationAttack65Light(t *testing.T) {
  1125  	testHighTDStarvationAttack(t, eth.ETH65, LightSync)
  1126  }
  1127  
  1128  func TestHighTDStarvationAttack66Full(t *testing.T) {
  1129  	testHighTDStarvationAttack(t, eth.ETH66, FullSync)
  1130  }
  1131  func TestHighTDStarvationAttack66Fast(t *testing.T) {
  1132  	testHighTDStarvationAttack(t, eth.ETH66, FastSync)
  1133  }
  1134  func TestHighTDStarvationAttack66Light(t *testing.T) {
  1135  	testHighTDStarvationAttack(t, eth.ETH66, LightSync)
  1136  }
  1137  
  1138  func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
  1139  	t.Parallel()
  1140  
  1141  	tester := newTester()
  1142  
  1143  	chain := testChainBase.shorten(1)
  1144  	tester.newPeer("attack", protocol, chain)
  1145  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1146  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1147  	}
  1148  	tester.terminate()
  1149  }
  1150  
  1151  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1152  func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH65) }
  1153  func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
  1154  
  1155  func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
  1156  	t.Parallel()
  1157  
  1158  	// Define the disconnection requirement for individual hash fetch errors
  1159  	tests := []struct {
  1160  		result error
  1161  		drop   bool
  1162  	}{
  1163  		{nil, false},                        // Sync succeeded, all is well
  1164  		{errBusy, false},                    // Sync is already in progress, no problem
  1165  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1166  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1167  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1168  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
  1169  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1170  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1171  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1172  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1173  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1174  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1175  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1176  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1177  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1178  	}
  1179  	// Run the tests and check disconnection status
  1180  	tester := newTester()
  1181  	defer tester.terminate()
  1182  	chain := testChainBase.shorten(1)
  1183  
  1184  	for i, tt := range tests {
  1185  		// Register a new peer and ensure its presence
  1186  		id := fmt.Sprintf("test %d", i)
  1187  		if err := tester.newPeer(id, protocol, chain); err != nil {
  1188  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1189  		}
  1190  		if _, ok := tester.peers[id]; !ok {
  1191  			t.Fatalf("test %d: registered peer not found", i)
  1192  		}
  1193  		// Simulate a synchronisation and check the required result
  1194  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1195  
  1196  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1197  		if _, ok := tester.peers[id]; !ok != tt.drop {
  1198  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1199  		}
  1200  	}
  1201  }
  1202  
  1203  // Tests that synchronisation progress (origin block number, current block number
  1204  // and highest block number) is tracked and updated correctly.
  1205  func TestSyncProgress65Full(t *testing.T)  { testSyncProgress(t, eth.ETH65, FullSync) }
  1206  func TestSyncProgress65Fast(t *testing.T)  { testSyncProgress(t, eth.ETH65, FastSync) }
  1207  func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, eth.ETH65, LightSync) }
  1208  
  1209  func TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, eth.ETH66, FullSync) }
  1210  func TestSyncProgress66Fast(t *testing.T)  { testSyncProgress(t, eth.ETH66, FastSync) }
  1211  func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
  1212  
  1213  func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1214  	t.Parallel()
  1215  
  1216  	tester := newTester()
  1217  	defer tester.terminate()
  1218  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1219  
  1220  	// Set a sync init hook to catch progress changes
  1221  	starting := make(chan struct{})
  1222  	progress := make(chan struct{})
  1223  
  1224  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1225  		starting <- struct{}{}
  1226  		<-progress
  1227  	}
  1228  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1229  
  1230  	// Synchronise half the blocks and check initial progress
  1231  	tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1232  	pending := new(sync.WaitGroup)
  1233  	pending.Add(1)
  1234  
  1235  	go func() {
  1236  		defer pending.Done()
  1237  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1238  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1239  		}
  1240  	}()
  1241  	<-starting
  1242  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1243  		HighestBlock: uint64(chain.len()/2 - 1),
  1244  	})
  1245  	progress <- struct{}{}
  1246  	pending.Wait()
  1247  
  1248  	// Synchronise all the blocks and check continuation progress
  1249  	tester.newPeer("peer-full", protocol, chain)
  1250  	pending.Add(1)
  1251  	go func() {
  1252  		defer pending.Done()
  1253  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1254  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1255  		}
  1256  	}()
  1257  	<-starting
  1258  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1259  		StartingBlock: uint64(chain.len()/2 - 1),
  1260  		CurrentBlock:  uint64(chain.len()/2 - 1),
  1261  		HighestBlock:  uint64(chain.len() - 1),
  1262  	})
  1263  
  1264  	// Check final progress after successful sync
  1265  	progress <- struct{}{}
  1266  	pending.Wait()
  1267  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1268  		StartingBlock: uint64(chain.len()/2 - 1),
  1269  		CurrentBlock:  uint64(chain.len() - 1),
  1270  		HighestBlock:  uint64(chain.len() - 1),
  1271  	})
  1272  }
  1273  
  1274  func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1275  	// Mark this method as a helper to report errors at callsite, not in here
  1276  	t.Helper()
  1277  
  1278  	p := d.Progress()
  1279  	p.KnownStates, p.PulledStates = 0, 0
  1280  	want.KnownStates, want.PulledStates = 0, 0
  1281  	if p != want {
  1282  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1283  	}
  1284  }
  1285  
  1286  // Tests that synchronisation progress (origin block number and highest block
  1287  // number) is tracked and updated correctly in case of a fork (or manual head
  1288  // revertal).
  1289  func TestForkedSyncProgress65Full(t *testing.T)  { testForkedSyncProgress(t, eth.ETH65, FullSync) }
  1290  func TestForkedSyncProgress65Fast(t *testing.T)  { testForkedSyncProgress(t, eth.ETH65, FastSync) }
  1291  func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, LightSync) }
  1292  
  1293  func TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FullSync) }
  1294  func TestForkedSyncProgress66Fast(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FastSync) }
  1295  func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
  1296  
  1297  func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1298  	t.Parallel()
  1299  
  1300  	tester := newTester()
  1301  	defer tester.terminate()
  1302  	chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch)
  1303  	chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch)
  1304  
  1305  	// Set a sync init hook to catch progress changes
  1306  	starting := make(chan struct{})
  1307  	progress := make(chan struct{})
  1308  
  1309  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1310  		starting <- struct{}{}
  1311  		<-progress
  1312  	}
  1313  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1314  
  1315  	// Synchronise with one of the forks and check progress
  1316  	tester.newPeer("fork A", protocol, chainA)
  1317  	pending := new(sync.WaitGroup)
  1318  	pending.Add(1)
  1319  	go func() {
  1320  		defer pending.Done()
  1321  		if err := tester.sync("fork A", nil, mode); err != nil {
  1322  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1323  		}
  1324  	}()
  1325  	<-starting
  1326  
  1327  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1328  		HighestBlock: uint64(chainA.len() - 1),
  1329  	})
  1330  	progress <- struct{}{}
  1331  	pending.Wait()
  1332  
  1333  	// Simulate a successful sync above the fork
  1334  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1335  
  1336  	// Synchronise with the second fork and check progress resets
  1337  	tester.newPeer("fork B", protocol, chainB)
  1338  	pending.Add(1)
  1339  	go func() {
  1340  		defer pending.Done()
  1341  		if err := tester.sync("fork B", nil, mode); err != nil {
  1342  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1343  		}
  1344  	}()
  1345  	<-starting
  1346  	checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1347  		StartingBlock: uint64(testChainBase.len()) - 1,
  1348  		CurrentBlock:  uint64(chainA.len() - 1),
  1349  		HighestBlock:  uint64(chainB.len() - 1),
  1350  	})
  1351  
  1352  	// Check final progress after successful sync
  1353  	progress <- struct{}{}
  1354  	pending.Wait()
  1355  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1356  		StartingBlock: uint64(testChainBase.len()) - 1,
  1357  		CurrentBlock:  uint64(chainB.len() - 1),
  1358  		HighestBlock:  uint64(chainB.len() - 1),
  1359  	})
  1360  }
  1361  
  1362  // Tests that if synchronisation is aborted due to some failure, then the progress
  1363  // origin is not updated in the next sync cycle, as it should be considered the
  1364  // continuation of the previous sync and not a new instance.
  1365  func TestFailedSyncProgress65Full(t *testing.T)  { testFailedSyncProgress(t, eth.ETH65, FullSync) }
  1366  func TestFailedSyncProgress65Fast(t *testing.T)  { testFailedSyncProgress(t, eth.ETH65, FastSync) }
  1367  func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, LightSync) }
  1368  
  1369  func TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FullSync) }
  1370  func TestFailedSyncProgress66Fast(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FastSync) }
  1371  func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
  1372  
  1373  func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1374  	t.Parallel()
  1375  
  1376  	tester := newTester()
  1377  	defer tester.terminate()
  1378  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1379  
  1380  	// Set a sync init hook to catch progress changes
  1381  	starting := make(chan struct{})
  1382  	progress := make(chan struct{})
  1383  
  1384  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1385  		starting <- struct{}{}
  1386  		<-progress
  1387  	}
  1388  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1389  
  1390  	// Attempt a full sync with a faulty peer
  1391  	brokenChain := chain.shorten(chain.len())
  1392  	missing := brokenChain.len() / 2
  1393  	delete(brokenChain.headerm, brokenChain.chain[missing])
  1394  	delete(brokenChain.blockm, brokenChain.chain[missing])
  1395  	delete(brokenChain.receiptm, brokenChain.chain[missing])
  1396  	tester.newPeer("faulty", protocol, brokenChain)
  1397  
  1398  	pending := new(sync.WaitGroup)
  1399  	pending.Add(1)
  1400  	go func() {
  1401  		defer pending.Done()
  1402  		if err := tester.sync("faulty", nil, mode); err == nil {
  1403  			panic("succeeded faulty synchronisation")
  1404  		}
  1405  	}()
  1406  	<-starting
  1407  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1408  		HighestBlock: uint64(brokenChain.len() - 1),
  1409  	})
  1410  	progress <- struct{}{}
  1411  	pending.Wait()
  1412  	afterFailedSync := tester.downloader.Progress()
  1413  
  1414  	// Synchronise with a good peer and check that the progress origin remind the same
  1415  	// after a failure
  1416  	tester.newPeer("valid", protocol, chain)
  1417  	pending.Add(1)
  1418  	go func() {
  1419  		defer pending.Done()
  1420  		if err := tester.sync("valid", nil, mode); err != nil {
  1421  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1422  		}
  1423  	}()
  1424  	<-starting
  1425  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1426  
  1427  	// Check final progress after successful sync
  1428  	progress <- struct{}{}
  1429  	pending.Wait()
  1430  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1431  		CurrentBlock: uint64(chain.len() - 1),
  1432  		HighestBlock: uint64(chain.len() - 1),
  1433  	})
  1434  }
  1435  
  1436  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1437  // the progress height is successfully reduced at the next sync invocation.
  1438  func TestFakedSyncProgress65Full(t *testing.T)  { testFakedSyncProgress(t, eth.ETH65, FullSync) }
  1439  func TestFakedSyncProgress65Fast(t *testing.T)  { testFakedSyncProgress(t, eth.ETH65, FastSync) }
  1440  func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, LightSync) }
  1441  
  1442  func TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FullSync) }
  1443  func TestFakedSyncProgress66Fast(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FastSync) }
  1444  func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
  1445  
  1446  func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1447  	t.Parallel()
  1448  
  1449  	tester := newTester()
  1450  	defer tester.terminate()
  1451  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1452  
  1453  	// Set a sync init hook to catch progress changes
  1454  	starting := make(chan struct{})
  1455  	progress := make(chan struct{})
  1456  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1457  		starting <- struct{}{}
  1458  		<-progress
  1459  	}
  1460  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1461  
  1462  	// Create and sync with an attacker that promises a higher chain than available.
  1463  	brokenChain := chain.shorten(chain.len())
  1464  	numMissing := 5
  1465  	for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1466  		delete(brokenChain.headerm, brokenChain.chain[i])
  1467  	}
  1468  	tester.newPeer("attack", protocol, brokenChain)
  1469  
  1470  	pending := new(sync.WaitGroup)
  1471  	pending.Add(1)
  1472  	go func() {
  1473  		defer pending.Done()
  1474  		if err := tester.sync("attack", nil, mode); err == nil {
  1475  			panic("succeeded attacker synchronisation")
  1476  		}
  1477  	}()
  1478  	<-starting
  1479  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1480  		HighestBlock: uint64(brokenChain.len() - 1),
  1481  	})
  1482  	progress <- struct{}{}
  1483  	pending.Wait()
  1484  	afterFailedSync := tester.downloader.Progress()
  1485  
  1486  	// Synchronise with a good peer and check that the progress height has been reduced to
  1487  	// the true value.
  1488  	validChain := chain.shorten(chain.len() - numMissing)
  1489  	tester.newPeer("valid", protocol, validChain)
  1490  	pending.Add(1)
  1491  
  1492  	go func() {
  1493  		defer pending.Done()
  1494  		if err := tester.sync("valid", nil, mode); err != nil {
  1495  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1496  		}
  1497  	}()
  1498  	<-starting
  1499  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1500  		CurrentBlock: afterFailedSync.CurrentBlock,
  1501  		HighestBlock: uint64(validChain.len() - 1),
  1502  	})
  1503  
  1504  	// Check final progress after successful sync.
  1505  	progress <- struct{}{}
  1506  	pending.Wait()
  1507  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1508  		CurrentBlock: uint64(validChain.len() - 1),
  1509  		HighestBlock: uint64(validChain.len() - 1),
  1510  	})
  1511  }
  1512  
  1513  // This test reproduces an issue where unexpected deliveries would
  1514  // block indefinitely if they arrived at the right time.
  1515  func TestDeliverHeadersHang65Full(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH65, FullSync) }
  1516  func TestDeliverHeadersHang65Fast(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH65, FastSync) }
  1517  func TestDeliverHeadersHang65Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, LightSync) }
  1518  
  1519  func TestDeliverHeadersHang66Full(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH66, FullSync) }
  1520  func TestDeliverHeadersHang66Fast(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH66, FastSync) }
  1521  func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) }
  1522  
  1523  func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {
  1524  	t.Parallel()
  1525  
  1526  	master := newTester()
  1527  	defer master.terminate()
  1528  	chain := testChainBase.shorten(15)
  1529  
  1530  	for i := 0; i < 200; i++ {
  1531  		tester := newTester()
  1532  		tester.peerDb = master.peerDb
  1533  		tester.newPeer("peer", protocol, chain)
  1534  
  1535  		// Whenever the downloader requests headers, flood it with
  1536  		// a lot of unrequested header deliveries.
  1537  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1538  			peer:   tester.downloader.peers.peers["peer"].peer,
  1539  			tester: tester,
  1540  		}
  1541  		if err := tester.sync("peer", nil, mode); err != nil {
  1542  			t.Errorf("test %d: sync failed: %v", i, err)
  1543  		}
  1544  		tester.terminate()
  1545  	}
  1546  }
  1547  
  1548  type floodingTestPeer struct {
  1549  	peer   Peer
  1550  	tester *downloadTester
  1551  }
  1552  
  1553  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1554  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1555  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1556  }
  1557  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1558  	return ftp.peer.RequestBodies(hashes)
  1559  }
  1560  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1561  	return ftp.peer.RequestReceipts(hashes)
  1562  }
  1563  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1564  	return ftp.peer.RequestNodeData(hashes)
  1565  }
  1566  
  1567  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1568  	deliveriesDone := make(chan struct{}, 500)
  1569  	for i := 0; i < cap(deliveriesDone)-1; i++ {
  1570  		peer := fmt.Sprintf("fake-peer%d", i)
  1571  		go func() {
  1572  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1573  			deliveriesDone <- struct{}{}
  1574  		}()
  1575  	}
  1576  
  1577  	// None of the extra deliveries should block.
  1578  	timeout := time.After(60 * time.Second)
  1579  	launched := false
  1580  	for i := 0; i < cap(deliveriesDone); i++ {
  1581  		select {
  1582  		case <-deliveriesDone:
  1583  			if !launched {
  1584  				// Start delivering the requested headers
  1585  				// after one of the flooding responses has arrived.
  1586  				go func() {
  1587  					ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1588  					deliveriesDone <- struct{}{}
  1589  				}()
  1590  				launched = true
  1591  			}
  1592  		case <-timeout:
  1593  			panic("blocked")
  1594  		}
  1595  	}
  1596  	return nil
  1597  }
  1598  
  1599  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1600  	testCases := []struct {
  1601  		remoteHeight uint64
  1602  		localHeight  uint64
  1603  		expected     []int
  1604  	}{
  1605  		// Remote is way higher. We should ask for the remote head and go backwards
  1606  		{1500, 1000,
  1607  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1608  		},
  1609  		{15000, 13006,
  1610  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1611  		},
  1612  		// Remote is pretty close to us. We don't have to fetch as many
  1613  		{1200, 1150,
  1614  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1615  		},
  1616  		// Remote is equal to us (so on a fork with higher td)
  1617  		// We should get the closest couple of ancestors
  1618  		{1500, 1500,
  1619  			[]int{1497, 1499},
  1620  		},
  1621  		// We're higher than the remote! Odd
  1622  		{1000, 1500,
  1623  			[]int{997, 999},
  1624  		},
  1625  		// Check some weird edgecases that it behaves somewhat rationally
  1626  		{0, 1500,
  1627  			[]int{0, 2},
  1628  		},
  1629  		{6000000, 0,
  1630  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1631  		},
  1632  		{0, 0,
  1633  			[]int{0, 2},
  1634  		},
  1635  	}
  1636  	reqs := func(from, count, span int) []int {
  1637  		var r []int
  1638  		num := from
  1639  		for len(r) < count {
  1640  			r = append(r, num)
  1641  			num += span + 1
  1642  		}
  1643  		return r
  1644  	}
  1645  	for i, tt := range testCases {
  1646  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1647  		data := reqs(int(from), count, span)
  1648  
  1649  		if max != uint64(data[len(data)-1]) {
  1650  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1651  		}
  1652  		failed := false
  1653  		if len(data) != len(tt.expected) {
  1654  			failed = true
  1655  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1656  		} else {
  1657  			for j, n := range data {
  1658  				if n != tt.expected[j] {
  1659  					failed = true
  1660  					break
  1661  				}
  1662  			}
  1663  		}
  1664  		if failed {
  1665  			res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1666  			exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1667  			t.Logf("got: %v\n", res)
  1668  			t.Logf("exp: %v\n", exp)
  1669  			t.Errorf("test %d: wrong values", i)
  1670  		}
  1671  	}
  1672  }
  1673  
  1674  // Tests that peers below a pre-configured checkpoint block are prevented from
  1675  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1676  func TestCheckpointEnforcement65Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FullSync) }
  1677  func TestCheckpointEnforcement65Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FastSync) }
  1678  func TestCheckpointEnforcement65Light(t *testing.T) {
  1679  	testCheckpointEnforcement(t, eth.ETH65, LightSync)
  1680  }
  1681  
  1682  func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
  1683  func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) }
  1684  func TestCheckpointEnforcement66Light(t *testing.T) {
  1685  	testCheckpointEnforcement(t, eth.ETH66, LightSync)
  1686  }
  1687  
  1688  func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
  1689  	t.Parallel()
  1690  
  1691  	// Create a new tester with a particular hard coded checkpoint block
  1692  	tester := newTester()
  1693  	defer tester.terminate()
  1694  
  1695  	tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1696  	chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1697  
  1698  	// Attempt to sync with the peer and validate the result
  1699  	tester.newPeer("peer", protocol, chain)
  1700  
  1701  	var expect error
  1702  	if mode == FastSync || mode == LightSync {
  1703  		expect = errUnsyncedPeer
  1704  	}
  1705  	if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1706  		t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1707  	}
  1708  	if mode == FastSync || mode == LightSync {
  1709  		assertOwnChain(t, tester, 1)
  1710  	} else {
  1711  		assertOwnChain(t, tester, chain.len())
  1712  	}
  1713  }