github.com/core-coin/go-core/v2@v2.1.9/xcb/downloader/downloader_test.go (about)

     1  // Copyright 2015 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/core-coin/go-core/v2/xcbdb"
    30  
    31  	core "github.com/core-coin/go-core/v2"
    32  	"github.com/core-coin/go-core/v2/common"
    33  	"github.com/core-coin/go-core/v2/core/rawdb"
    34  	"github.com/core-coin/go-core/v2/core/types"
    35  	"github.com/core-coin/go-core/v2/event"
    36  	"github.com/core-coin/go-core/v2/trie"
    37  )
    38  
    39  // Reduce some of the parameters to make the tester faster.
    40  func init() {
    41  	fullMaxForkAncestry = 10000
    42  	lightMaxForkAncestry = 10000
    43  	blockCacheMaxItems = 1024
    44  	fsHeaderContCheck = 500 * time.Millisecond
    45  }
    46  
    47  // downloadTester is a test simulator for mocking out local block chain.
    48  type downloadTester struct {
    49  	downloader *Downloader
    50  
    51  	genesis *types.Block   // Genesis blocks used by the tester and peers
    52  	stateDb xcbdb.Database // Database used by the tester for syncing from peers
    53  	peerDb  xcbdb.Database // Database of the peers containing all data
    54  	peers   map[string]*downloadTesterPeer
    55  
    56  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    57  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    58  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    59  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    60  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    61  
    62  	ancientHeaders  map[common.Hash]*types.Header  // Ancient headers belonging to the tester
    63  	ancientBlocks   map[common.Hash]*types.Block   // Ancient blocks belonging to the tester
    64  	ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
    65  	ancientChainTd  map[common.Hash]*big.Int       // Ancient total difficulties of the blocks in the local chain
    66  
    67  	lock sync.RWMutex
    68  }
    69  
    70  // newTester creates a new downloader test mocker.
    71  func newTester() *downloadTester {
    72  	tester := &downloadTester{
    73  		genesis:     testGenesis,
    74  		peerDb:      testDB,
    75  		peers:       make(map[string]*downloadTesterPeer),
    76  		ownHashes:   []common.Hash{testGenesis.Hash()},
    77  		ownHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    78  		ownBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    79  		ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    80  		ownChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    81  
    82  		// Initialize ancient store with test genesis block
    83  		ancientHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    84  		ancientBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    85  		ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    86  		ancientChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    87  	}
    88  	tester.stateDb = rawdb.NewMemoryDatabase()
    89  	tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
    90  
    91  	tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer)
    92  	return tester
    93  }
    94  
    95  // terminate aborts any operations on the embedded downloader and releases all
    96  // held resources.
    97  func (dl *downloadTester) terminate() {
    98  	dl.downloader.Terminate()
    99  }
   100  
   101  // sync starts synchronizing with a remote peer, blocking until it completes.
   102  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   103  	dl.lock.RLock()
   104  	hash := dl.peers[id].chain.headBlock().Hash()
   105  	// If no particular TD was requested, load from the peer's blockchain
   106  	if td == nil {
   107  		td = dl.peers[id].chain.td(hash)
   108  	}
   109  	dl.lock.RUnlock()
   110  
   111  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   112  	err := dl.downloader.synchronise(id, hash, td, mode)
   113  	select {
   114  	case <-dl.downloader.cancelCh:
   115  		// Ok, downloader fully cancelled after sync cycle
   116  	default:
   117  		// Downloader is still accepting packets, can block a peer up
   118  		panic("downloader active post sync cycle") // panic will be caught by tester
   119  	}
   120  	return err
   121  }
   122  
   123  // HasHeader checks if a header is present in the testers canonical chain.
   124  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   125  	return dl.GetHeaderByHash(hash) != nil
   126  }
   127  
   128  // HasBlock checks if a block is present in the testers canonical chain.
   129  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   130  	return dl.GetBlockByHash(hash) != nil
   131  }
   132  
   133  // HasFastBlock checks if a block is present in the testers canonical chain.
   134  func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
   135  	dl.lock.RLock()
   136  	defer dl.lock.RUnlock()
   137  
   138  	if _, ok := dl.ancientReceipts[hash]; ok {
   139  		return true
   140  	}
   141  	_, ok := dl.ownReceipts[hash]
   142  	return ok
   143  }
   144  
   145  // GetHeader retrieves a header from the testers canonical chain.
   146  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   147  	dl.lock.RLock()
   148  	defer dl.lock.RUnlock()
   149  	return dl.getHeaderByHash(hash)
   150  }
   151  
   152  // getHeaderByHash returns the header if found either within ancients or own blocks)
   153  // This method assumes that the caller holds at least the read-lock (dl.lock)
   154  func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
   155  	header := dl.ancientHeaders[hash]
   156  	if header != nil {
   157  		return header
   158  	}
   159  	return dl.ownHeaders[hash]
   160  }
   161  
   162  // GetBlock retrieves a block from the testers canonical chain.
   163  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   164  	dl.lock.RLock()
   165  	defer dl.lock.RUnlock()
   166  
   167  	block := dl.ancientBlocks[hash]
   168  	if block != nil {
   169  		return block
   170  	}
   171  	return dl.ownBlocks[hash]
   172  }
   173  
   174  // CurrentHeader retrieves the current head header from the canonical chain.
   175  func (dl *downloadTester) CurrentHeader() *types.Header {
   176  	dl.lock.RLock()
   177  	defer dl.lock.RUnlock()
   178  
   179  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   180  		if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
   181  			return header
   182  		}
   183  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   184  			return header
   185  		}
   186  	}
   187  	return dl.genesis.Header()
   188  }
   189  
   190  // CurrentBlock retrieves the current head block from the canonical chain.
   191  func (dl *downloadTester) CurrentBlock() *types.Block {
   192  	dl.lock.RLock()
   193  	defer dl.lock.RUnlock()
   194  
   195  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   196  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   197  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   198  				return block
   199  			}
   200  			return block
   201  		}
   202  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   203  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   204  				return block
   205  			}
   206  		}
   207  	}
   208  	return dl.genesis
   209  }
   210  
   211  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   212  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   213  	dl.lock.RLock()
   214  	defer dl.lock.RUnlock()
   215  
   216  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   217  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   218  			return block
   219  		}
   220  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   221  			return block
   222  		}
   223  	}
   224  	return dl.genesis
   225  }
   226  
   227  // FastSyncCommitHead manually sets the head block to a given hash.
   228  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   229  	// For now only check that the state trie is correct
   230  	if block := dl.GetBlockByHash(hash); block != nil {
   231  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
   232  		return err
   233  	}
   234  	return fmt.Errorf("non existent block: %x", hash[:4])
   235  }
   236  
   237  // GetTd retrieves the block's total difficulty from the canonical chain.
   238  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   239  	dl.lock.RLock()
   240  	defer dl.lock.RUnlock()
   241  
   242  	return dl.getTd(hash)
   243  }
   244  
   245  // getTd retrieves the block's total difficulty if found either within
   246  // ancients or own blocks).
   247  // This method assumes that the caller holds at least the read-lock (dl.lock)
   248  func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
   249  	if td := dl.ancientChainTd[hash]; td != nil {
   250  		return td
   251  	}
   252  	return dl.ownChainTd[hash]
   253  }
   254  
   255  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   256  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
   257  	dl.lock.Lock()
   258  	defer dl.lock.Unlock()
   259  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   260  	if dl.getHeaderByHash(headers[0].ParentHash) == nil {
   261  		return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
   262  	}
   263  	var hashes []common.Hash
   264  	for i := 1; i < len(headers); i++ {
   265  		hash := headers[i-1].Hash()
   266  		if headers[i].ParentHash != headers[i-1].Hash() {
   267  			return i, fmt.Errorf("non-contiguous import at position %d", i)
   268  		}
   269  		hashes = append(hashes, hash)
   270  	}
   271  	hashes = append(hashes, headers[len(headers)-1].Hash())
   272  	// Do a full insert if pre-checks passed
   273  	for i, header := range headers {
   274  		hash := hashes[i]
   275  		if dl.getHeaderByHash(hash) != nil {
   276  			continue
   277  		}
   278  		if dl.getHeaderByHash(header.ParentHash) == nil {
   279  			// This _should_ be impossible, due to precheck and induction
   280  			return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
   281  		}
   282  		dl.ownHashes = append(dl.ownHashes, hash)
   283  		dl.ownHeaders[hash] = header
   284  
   285  		td := dl.getTd(header.ParentHash)
   286  		dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
   287  	}
   288  	return len(headers), nil
   289  }
   290  
   291  // InsertChain injects a new batch of blocks into the simulated chain.
   292  func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
   293  	dl.lock.Lock()
   294  	defer dl.lock.Unlock()
   295  	for i, block := range blocks {
   296  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   297  			return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
   298  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   299  			return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
   300  		}
   301  		if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
   302  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   303  			dl.ownHeaders[block.Hash()] = block.Header()
   304  		}
   305  		dl.ownBlocks[block.Hash()] = block
   306  		dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
   307  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   308  		td := dl.getTd(block.ParentHash())
   309  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
   310  	}
   311  	return len(blocks), nil
   312  }
   313  
   314  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   315  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
   316  	dl.lock.Lock()
   317  	defer dl.lock.Unlock()
   318  
   319  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   320  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   321  			return i, errors.New("unknown owner")
   322  		}
   323  		if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
   324  			if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   325  				return i, errors.New("InsertReceiptChain: unknown parent")
   326  			}
   327  		}
   328  		if blocks[i].NumberU64() <= ancientLimit {
   329  			dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
   330  			dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
   331  
   332  			// Migrate from active db to ancient db
   333  			dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
   334  			dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
   335  			delete(dl.ownHeaders, blocks[i].Hash())
   336  			delete(dl.ownChainTd, blocks[i].Hash())
   337  		} else {
   338  			dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   339  			dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   340  		}
   341  	}
   342  	return len(blocks), nil
   343  }
   344  
   345  // SetHead rewinds the local chain to a new head.
   346  func (dl *downloadTester) SetHead(head uint64) error {
   347  	dl.lock.Lock()
   348  	defer dl.lock.Unlock()
   349  
   350  	// Find the hash of the head to reset to
   351  	var hash common.Hash
   352  	for h, header := range dl.ownHeaders {
   353  		if header.Number.Uint64() == head {
   354  			hash = h
   355  		}
   356  	}
   357  	for h, header := range dl.ancientHeaders {
   358  		if header.Number.Uint64() == head {
   359  			hash = h
   360  		}
   361  	}
   362  	if hash == (common.Hash{}) {
   363  		return fmt.Errorf("unknown head to set: %d", head)
   364  	}
   365  	// Find the offset in the header chain
   366  	var offset int
   367  	for o, h := range dl.ownHashes {
   368  		if h == hash {
   369  			offset = o
   370  			break
   371  		}
   372  	}
   373  	// Remove all the hashes and associated data afterwards
   374  	for i := offset + 1; i < len(dl.ownHashes); i++ {
   375  		delete(dl.ownChainTd, dl.ownHashes[i])
   376  		delete(dl.ownHeaders, dl.ownHashes[i])
   377  		delete(dl.ownReceipts, dl.ownHashes[i])
   378  		delete(dl.ownBlocks, dl.ownHashes[i])
   379  
   380  		delete(dl.ancientChainTd, dl.ownHashes[i])
   381  		delete(dl.ancientHeaders, dl.ownHashes[i])
   382  		delete(dl.ancientReceipts, dl.ownHashes[i])
   383  		delete(dl.ancientBlocks, dl.ownHashes[i])
   384  	}
   385  	dl.ownHashes = dl.ownHashes[:offset+1]
   386  	return nil
   387  }
   388  
   389  // Rollback removes some recently added elements from the chain.
   390  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   391  }
   392  
   393  // newPeer registers a new block download source into the downloader.
   394  func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error {
   395  	dl.lock.Lock()
   396  	defer dl.lock.Unlock()
   397  
   398  	peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
   399  	dl.peers[id] = peer
   400  	return dl.downloader.RegisterPeer(id, version, peer)
   401  }
   402  
   403  // dropPeer simulates a hard peer removal from the connection pool.
   404  func (dl *downloadTester) dropPeer(id string) {
   405  	dl.lock.Lock()
   406  	defer dl.lock.Unlock()
   407  
   408  	delete(dl.peers, id)
   409  	dl.downloader.UnregisterPeer(id)
   410  }
   411  
   412  type downloadTesterPeer struct {
   413  	dl            *downloadTester
   414  	id            string
   415  	chain         *testChain
   416  	missingStates map[common.Hash]bool // State entries that fast sync should not return
   417  }
   418  
   419  // Head constructs a function to retrieve a peer's current head hash
   420  // and total difficulty.
   421  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   422  	b := dlp.chain.headBlock()
   423  	return b.Hash(), dlp.chain.td(b.Hash())
   424  }
   425  
   426  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   427  // origin; associated with a particular peer in the download tester. The returned
   428  // function can be used to retrieve batches of headers from the particular peer.
   429  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   430  	result := dlp.chain.headersByHash(origin, amount, skip, reverse)
   431  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   432  	return nil
   433  }
   434  
   435  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   436  // origin; associated with a particular peer in the download tester. The returned
   437  // function can be used to retrieve batches of headers from the particular peer.
   438  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   439  	result := dlp.chain.headersByNumber(origin, amount, skip, reverse)
   440  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   441  	return nil
   442  }
   443  
   444  // RequestBodies constructs a getBlockBodies method associated with a particular
   445  // peer in the download tester. The returned function can be used to retrieve
   446  // batches of block bodies from the particularly requested peer.
   447  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   448  	txs, uncles := dlp.chain.bodies(hashes)
   449  	go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
   450  	return nil
   451  }
   452  
   453  // RequestReceipts constructs a getReceipts method associated with a particular
   454  // peer in the download tester. The returned function can be used to retrieve
   455  // batches of block receipts from the particularly requested peer.
   456  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   457  	receipts := dlp.chain.receipts(hashes)
   458  	go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
   459  	return nil
   460  }
   461  
   462  // RequestNodeData constructs a getNodeData method associated with a particular
   463  // peer in the download tester. The returned function can be used to retrieve
   464  // batches of node state data from the particularly requested peer.
   465  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   466  	dlp.dl.lock.RLock()
   467  	defer dlp.dl.lock.RUnlock()
   468  
   469  	results := make([][]byte, 0, len(hashes))
   470  	for _, hash := range hashes {
   471  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   472  			if !dlp.missingStates[hash] {
   473  				results = append(results, data)
   474  			}
   475  		}
   476  	}
   477  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   478  	return nil
   479  }
   480  
   481  // assertOwnChain checks if the local chain contains the correct number of items
   482  // of the various chain components.
   483  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   484  	// Mark this method as a helper to report errors at callsite, not in here
   485  	t.Helper()
   486  
   487  	assertOwnForkedChain(t, tester, 1, []int{length})
   488  }
   489  
   490  // assertOwnForkedChain checks if the local forked chain contains the correct
   491  // number of items of the various chain components.
   492  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   493  	// Mark this method as a helper to report errors at callsite, not in here
   494  	t.Helper()
   495  
   496  	// Initialize the counters for the first fork
   497  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
   498  
   499  	// Update the counters for each subsequent fork
   500  	for _, length := range lengths[1:] {
   501  		headers += length - common
   502  		blocks += length - common
   503  		receipts += length - common
   504  	}
   505  	if tester.downloader.getMode() == LightSync {
   506  		blocks, receipts = 1, 1
   507  	}
   508  	if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
   509  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   510  	}
   511  	if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
   512  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   513  	}
   514  	if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
   515  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   516  	}
   517  }
   518  
   519  // Tests that simple synchronization against a canonical chain works correctly.
   520  // In this test common ancestor lookup should be short circuited and not require
   521  // binary searching.
   522  func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) }
   523  func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) }
   524  func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) }
   525  func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) }
   526  func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonicalSynchronisation(t, 65, FullSync) }
   527  func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonicalSynchronisation(t, 65, FastSync) }
   528  func TestCanonicalSynchronisation65Light(t *testing.T) {
   529  	testCanonicalSynchronisation(t, 65, LightSync)
   530  }
   531  
   532  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   533  	t.Parallel()
   534  
   535  	tester := newTester()
   536  	defer tester.terminate()
   537  
   538  	// Create a small enough block chain to download
   539  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   540  	tester.newPeer("peer", protocol, chain)
   541  
   542  	// Synchronise with the peer and make sure all relevant data was retrieved
   543  	if err := tester.sync("peer", nil, mode); err != nil {
   544  		t.Fatalf("failed to synchronise blocks: %v", err)
   545  	}
   546  	assertOwnChain(t, tester, chain.len())
   547  }
   548  
   549  // Tests that if a large batch of blocks are being downloaded, it is throttled
   550  // until the cached blocks are retrieved.
   551  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   552  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   553  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   554  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   555  func TestThrottling65Full(t *testing.T) { testThrottling(t, 65, FullSync) }
   556  func TestThrottling65Fast(t *testing.T) { testThrottling(t, 65, FastSync) }
   557  
   558  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   559  	t.Parallel()
   560  	tester := newTester()
   561  
   562  	// Create a long block chain to download and the tester
   563  	targetBlocks := testChainBase.len() - 1
   564  	tester.newPeer("peer", protocol, testChainBase)
   565  
   566  	// Wrap the importer to allow stepping
   567  	blocked, proceed := uint32(0), make(chan struct{})
   568  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   569  		atomic.StoreUint32(&blocked, uint32(len(results)))
   570  		<-proceed
   571  	}
   572  	// Start a synchronisation concurrently
   573  	errc := make(chan error)
   574  	go func() {
   575  		errc <- tester.sync("peer", nil, mode)
   576  	}()
   577  	// Iteratively take some blocks, always checking the retrieval count
   578  	for {
   579  		// Check the retrieval count synchronously (! reason for this ugly block)
   580  		tester.lock.RLock()
   581  		retrieved := len(tester.ownBlocks)
   582  		tester.lock.RUnlock()
   583  		if retrieved >= targetBlocks+1 {
   584  			break
   585  		}
   586  		// Wait a bit for sync to throttle itself
   587  		var cached, frozen int
   588  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   589  			time.Sleep(25 * time.Millisecond)
   590  
   591  			tester.lock.Lock()
   592  			{
   593  				tester.downloader.queue.resultCache.lock.Lock()
   594  				cached = tester.downloader.queue.resultCache.countCompleted()
   595  				tester.downloader.queue.resultCache.lock.Unlock()
   596  				frozen = int(atomic.LoadUint32(&blocked))
   597  				retrieved = len(tester.ownBlocks)
   598  
   599  			}
   600  			tester.lock.Unlock()
   601  
   602  			if cached == blockCacheMaxItems ||
   603  				cached == blockCacheMaxItems-reorgProtHeaderDelay ||
   604  				retrieved+cached+frozen == targetBlocks+1 ||
   605  				retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   606  				break
   607  			}
   608  		}
   609  		// Make sure we filled up the cache, then exhaust it
   610  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   611  		tester.lock.RLock()
   612  		retrieved = len(tester.ownBlocks)
   613  		tester.lock.RUnlock()
   614  		if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   615  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
   616  		}
   617  
   618  		// Permit the blocked blocks to import
   619  		if atomic.LoadUint32(&blocked) > 0 {
   620  			atomic.StoreUint32(&blocked, uint32(0))
   621  			proceed <- struct{}{}
   622  		}
   623  	}
   624  	// Check that we haven't pulled more blocks than available
   625  	assertOwnChain(t, tester, targetBlocks+1)
   626  	if err := <-errc; err != nil {
   627  		t.Fatalf("block synchronization failed: %v", err)
   628  	}
   629  	tester.terminate()
   630  
   631  }
   632  
   633  // Tests that simple synchronization against a forked chain works correctly. In
   634  // this test common ancestor lookup should *not* be short circuited, and a full
   635  // binary search should be executed.
   636  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   637  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   638  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   639  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   640  func TestForkedSync65Full(t *testing.T)  { testForkedSync(t, 65, FullSync) }
   641  func TestForkedSync65Fast(t *testing.T)  { testForkedSync(t, 65, FastSync) }
   642  func TestForkedSync65Light(t *testing.T) { testForkedSync(t, 65, LightSync) }
   643  
   644  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   645  	t.Parallel()
   646  
   647  	tester := newTester()
   648  	defer tester.terminate()
   649  
   650  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   651  	chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
   652  	tester.newPeer("fork A", protocol, chainA)
   653  	tester.newPeer("fork B", protocol, chainB)
   654  	// Synchronise with the peer and make sure all blocks were retrieved
   655  	if err := tester.sync("fork A", nil, mode); err != nil {
   656  		t.Fatalf("failed to synchronise blocks: %v", err)
   657  	}
   658  	assertOwnChain(t, tester, chainA.len())
   659  
   660  	// Synchronise with the second peer and make sure that fork is pulled too
   661  	if err := tester.sync("fork B", nil, mode); err != nil {
   662  		t.Fatalf("failed to synchronise blocks: %v", err)
   663  	}
   664  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   665  }
   666  
   667  // Tests that synchronising against a much shorter but much heavyer fork works
   668  // corrently and is not dropped.
   669  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   670  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   671  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   672  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   673  func TestHeavyForkedSync65Full(t *testing.T)  { testHeavyForkedSync(t, 65, FullSync) }
   674  func TestHeavyForkedSync65Fast(t *testing.T)  { testHeavyForkedSync(t, 65, FastSync) }
   675  func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, 65, LightSync) }
   676  
   677  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   678  	t.Parallel()
   679  
   680  	tester := newTester()
   681  	defer tester.terminate()
   682  
   683  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   684  	chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
   685  	tester.newPeer("light", protocol, chainA)
   686  	tester.newPeer("heavy", protocol, chainB)
   687  
   688  	// Synchronise with the peer and make sure all blocks were retrieved
   689  	if err := tester.sync("light", nil, mode); err != nil {
   690  		t.Fatalf("failed to synchronise blocks: %v", err)
   691  	}
   692  	assertOwnChain(t, tester, chainA.len())
   693  
   694  	// Synchronise with the second peer and make sure that fork is pulled too
   695  	if err := tester.sync("heavy", nil, mode); err != nil {
   696  		t.Fatalf("failed to synchronise blocks: %v", err)
   697  	}
   698  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   699  }
   700  
   701  // Tests that chain forks are contained within a certain interval of the current
   702  // chain head, ensuring that malicious peers cannot waste resources by feeding
   703  // long dead chains.
   704  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   705  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   706  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   707  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   708  func TestBoundedForkedSync65Full(t *testing.T)  { testBoundedForkedSync(t, 65, FullSync) }
   709  func TestBoundedForkedSync65Fast(t *testing.T)  { testBoundedForkedSync(t, 65, FastSync) }
   710  func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, 65, LightSync) }
   711  
   712  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   713  	t.Parallel()
   714  
   715  	tester := newTester()
   716  	defer tester.terminate()
   717  
   718  	chainA := testChainForkLightA
   719  	chainB := testChainForkLightB
   720  	tester.newPeer("original", protocol, chainA)
   721  	tester.newPeer("rewriter", protocol, chainB)
   722  
   723  	// Synchronise with the peer and make sure all blocks were retrieved
   724  	if err := tester.sync("original", nil, mode); err != nil {
   725  		t.Fatalf("failed to synchronise blocks: %v", err)
   726  	}
   727  	assertOwnChain(t, tester, chainA.len())
   728  
   729  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   730  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   731  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   732  	}
   733  }
   734  
   735  // Tests that chain forks are contained within a certain interval of the current
   736  // chain head for short but heavy forks too. These are a bit special because they
   737  // take different ancestor lookup paths.
   738  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   739  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   740  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   741  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   742  func TestBoundedHeavyForkedSync65Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 65, FullSync) }
   743  func TestBoundedHeavyForkedSync65Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 65, FastSync) }
   744  func TestBoundedHeavyForkedSync65Light(t *testing.T) { testBoundedHeavyForkedSync(t, 65, LightSync) }
   745  
   746  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   747  	t.Parallel()
   748  	tester := newTester()
   749  
   750  	// Create a long enough forked chain
   751  	chainA := testChainForkLightA
   752  	chainB := testChainForkHeavy
   753  	tester.newPeer("original", protocol, chainA)
   754  
   755  	// Synchronise with the peer and make sure all blocks were retrieved
   756  	if err := tester.sync("original", nil, mode); err != nil {
   757  		t.Fatalf("failed to synchronise blocks: %v", err)
   758  	}
   759  	assertOwnChain(t, tester, chainA.len())
   760  
   761  	tester.newPeer("heavy-rewriter", protocol, chainB)
   762  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   763  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   764  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   765  	}
   766  	tester.terminate()
   767  }
   768  
   769  // Tests that an inactive downloader will not accept incoming block headers,
   770  // bodies and receipts.
   771  func TestInactiveDownloader63(t *testing.T) {
   772  	t.Parallel()
   773  
   774  	tester := newTester()
   775  	defer tester.terminate()
   776  
   777  	// Check that neither block headers nor bodies are accepted
   778  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   779  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   780  	}
   781  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   782  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   783  	}
   784  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   785  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   786  	}
   787  }
   788  
   789  // Tests that a canceled download wipes all previously accumulated state.
   790  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   791  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   792  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   793  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   794  func TestCancel65Full(t *testing.T)  { testCancel(t, 65, FullSync) }
   795  func TestCancel65Fast(t *testing.T)  { testCancel(t, 65, FastSync) }
   796  func TestCancel65Light(t *testing.T) { testCancel(t, 65, LightSync) }
   797  
   798  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   799  	t.Parallel()
   800  
   801  	tester := newTester()
   802  	defer tester.terminate()
   803  
   804  	chain := testChainBase.shorten(MaxHeaderFetch)
   805  	tester.newPeer("peer", protocol, chain)
   806  
   807  	// Make sure canceling works with a pristine downloader
   808  	tester.downloader.Cancel()
   809  	if !tester.downloader.queue.Idle() {
   810  		t.Errorf("download queue not idle")
   811  	}
   812  	// Synchronise with the peer, but cancel afterwards
   813  	if err := tester.sync("peer", nil, mode); err != nil {
   814  		t.Fatalf("failed to synchronise blocks: %v", err)
   815  	}
   816  	tester.downloader.Cancel()
   817  	if !tester.downloader.queue.Idle() {
   818  		t.Errorf("download queue not idle")
   819  	}
   820  }
   821  
   822  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   823  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   824  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   825  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   826  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   827  func TestMultiSynchronisation65Full(t *testing.T)  { testMultiSynchronisation(t, 65, FullSync) }
   828  func TestMultiSynchronisation65Fast(t *testing.T)  { testMultiSynchronisation(t, 65, FastSync) }
   829  func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, 65, LightSync) }
   830  
   831  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   832  	t.Parallel()
   833  
   834  	tester := newTester()
   835  	defer tester.terminate()
   836  
   837  	// Create various peers with various parts of the chain
   838  	targetPeers := 8
   839  	chain := testChainBase.shorten(targetPeers * 100)
   840  
   841  	for i := 0; i < targetPeers; i++ {
   842  		id := fmt.Sprintf("peer #%d", i)
   843  		tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
   844  	}
   845  	if err := tester.sync("peer #0", nil, mode); err != nil {
   846  		t.Fatalf("failed to synchronise blocks: %v", err)
   847  	}
   848  	assertOwnChain(t, tester, chain.len())
   849  }
   850  
   851  // Tests that synchronisations behave well in multi-version protocol environments
   852  // and not wreak havoc on other nodes in the network.
   853  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
   854  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
   855  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
   856  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
   857  func TestMultiProtoSynchronisation65Full(t *testing.T)  { testMultiProtoSync(t, 65, FullSync) }
   858  func TestMultiProtoSynchronisation65Fast(t *testing.T)  { testMultiProtoSync(t, 65, FastSync) }
   859  func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, 65, LightSync) }
   860  
   861  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
   862  	t.Parallel()
   863  
   864  	tester := newTester()
   865  	defer tester.terminate()
   866  
   867  	// Create a small enough block chain to download
   868  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   869  
   870  	// Create peers of every type
   871  	tester.newPeer("peer 63", 63, chain)
   872  	tester.newPeer("peer 64", 64, chain)
   873  	tester.newPeer("peer 65", 65, chain)
   874  
   875  	// Synchronise with the requested peer and make sure all blocks were retrieved
   876  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   877  		t.Fatalf("failed to synchronise blocks: %v", err)
   878  	}
   879  	assertOwnChain(t, tester, chain.len())
   880  
   881  	// Check that no peers have been dropped off
   882  	for _, version := range []int{63, 64, 65} {
   883  		peer := fmt.Sprintf("peer %d", version)
   884  		if _, ok := tester.peers[peer]; !ok {
   885  			t.Errorf("%s dropped", peer)
   886  		}
   887  	}
   888  }
   889  
   890  // Tests that if a block is empty (e.g. header only), no body request should be
   891  // made, and instead the header should be assembled into a whole block in itself.
   892  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
   893  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
   894  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
   895  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
   896  func TestEmptyShortCircuit65Full(t *testing.T)  { testEmptyShortCircuit(t, 65, FullSync) }
   897  func TestEmptyShortCircuit65Fast(t *testing.T)  { testEmptyShortCircuit(t, 65, FastSync) }
   898  func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, 65, LightSync) }
   899  
   900  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
   901  	t.Parallel()
   902  
   903  	tester := newTester()
   904  	defer tester.terminate()
   905  
   906  	// Create a block chain to download
   907  	chain := testChainBase
   908  	tester.newPeer("peer", protocol, chain)
   909  
   910  	// Instrument the downloader to signal body requests
   911  	bodiesHave, receiptsHave := int32(0), int32(0)
   912  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   913  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   914  	}
   915  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   916  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   917  	}
   918  	// Synchronise with the peer and make sure all blocks were retrieved
   919  	if err := tester.sync("peer", nil, mode); err != nil {
   920  		t.Fatalf("failed to synchronise blocks: %v", err)
   921  	}
   922  	assertOwnChain(t, tester, chain.len())
   923  
   924  	// Validate the number of block bodies that should have been requested
   925  	bodiesNeeded, receiptsNeeded := 0, 0
   926  	for _, block := range chain.blockm {
   927  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   928  			bodiesNeeded++
   929  		}
   930  	}
   931  	for _, receipt := range chain.receiptm {
   932  		if mode == FastSync && len(receipt) > 0 {
   933  			receiptsNeeded++
   934  		}
   935  	}
   936  	if int(bodiesHave) != bodiesNeeded {
   937  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   938  	}
   939  	if int(receiptsHave) != receiptsNeeded {
   940  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   941  	}
   942  }
   943  
   944  // Tests that headers are enqueued continuously, preventing malicious nodes from
   945  // stalling the downloader by feeding gapped header chains.
   946  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
   947  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
   948  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
   949  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
   950  func TestMissingHeaderAttack65Full(t *testing.T)  { testMissingHeaderAttack(t, 65, FullSync) }
   951  func TestMissingHeaderAttack65Fast(t *testing.T)  { testMissingHeaderAttack(t, 65, FastSync) }
   952  func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, 65, LightSync) }
   953  
   954  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
   955  	t.Parallel()
   956  
   957  	tester := newTester()
   958  	defer tester.terminate()
   959  
   960  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   961  	brokenChain := chain.shorten(chain.len())
   962  	delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
   963  	tester.newPeer("attack", protocol, brokenChain)
   964  
   965  	if err := tester.sync("attack", nil, mode); err == nil {
   966  		t.Fatalf("succeeded attacker synchronisation")
   967  	}
   968  	// Synchronise with the valid peer and make sure sync succeeds
   969  	tester.newPeer("valid", protocol, chain)
   970  	if err := tester.sync("valid", nil, mode); err != nil {
   971  		t.Fatalf("failed to synchronise blocks: %v", err)
   972  	}
   973  	assertOwnChain(t, tester, chain.len())
   974  }
   975  
   976  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   977  // detects the invalid numbering.
   978  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
   979  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
   980  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
   981  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
   982  func TestShiftedHeaderAttack65Full(t *testing.T)  { testShiftedHeaderAttack(t, 65, FullSync) }
   983  func TestShiftedHeaderAttack65Fast(t *testing.T)  { testShiftedHeaderAttack(t, 65, FastSync) }
   984  func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, 65, LightSync) }
   985  
   986  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
   987  	t.Parallel()
   988  
   989  	tester := newTester()
   990  	defer tester.terminate()
   991  
   992  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   993  
   994  	// Attempt a full sync with an attacker feeding shifted headers
   995  	brokenChain := chain.shorten(chain.len())
   996  	delete(brokenChain.headerm, brokenChain.chain[1])
   997  	delete(brokenChain.blockm, brokenChain.chain[1])
   998  	delete(brokenChain.receiptm, brokenChain.chain[1])
   999  	tester.newPeer("attack", protocol, brokenChain)
  1000  	if err := tester.sync("attack", nil, mode); err == nil {
  1001  		t.Fatalf("succeeded attacker synchronisation")
  1002  	}
  1003  
  1004  	// Synchronise with the valid peer and make sure sync succeeds
  1005  	tester.newPeer("valid", protocol, chain)
  1006  	if err := tester.sync("valid", nil, mode); err != nil {
  1007  		t.Fatalf("failed to synchronise blocks: %v", err)
  1008  	}
  1009  	assertOwnChain(t, tester, chain.len())
  1010  }
  1011  
  1012  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1013  // for various failure scenarios. Afterwards a full sync is attempted to make
  1014  // sure no state was corrupted.
  1015  func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) }
  1016  func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) }
  1017  func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, 65, FastSync) }
  1018  
  1019  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1020  	t.Parallel()
  1021  
  1022  	tester := newTester()
  1023  
  1024  	// Create a small enough block chain to download
  1025  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1026  	chain := testChainBase.shorten(targetBlocks)
  1027  
  1028  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1029  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1030  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1031  	fastAttackChain := chain.shorten(chain.len())
  1032  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
  1033  	tester.newPeer("fast-attack", protocol, fastAttackChain)
  1034  
  1035  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1036  		t.Fatalf("succeeded fast attacker synchronisation")
  1037  	}
  1038  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1039  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1040  	}
  1041  
  1042  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1043  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1044  	// rolled back, and also the pivot point being reverted to a non-block status.
  1045  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1046  	blockAttackChain := chain.shorten(chain.len())
  1047  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
  1048  	delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
  1049  	tester.newPeer("block-attack", protocol, blockAttackChain)
  1050  
  1051  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1052  		t.Fatalf("succeeded block attacker synchronisation")
  1053  	}
  1054  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1055  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1056  	}
  1057  	if mode == FastSync {
  1058  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1059  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1060  		}
  1061  	}
  1062  
  1063  	// Attempt to sync with an attacker that withholds promised blocks after the
  1064  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1065  	// but already imported pivot block.
  1066  	withholdAttackChain := chain.shorten(chain.len())
  1067  	tester.newPeer("withhold-attack", protocol, withholdAttackChain)
  1068  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1069  		for i := missing; i < withholdAttackChain.len(); i++ {
  1070  			delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
  1071  		}
  1072  		tester.downloader.syncInitHook = nil
  1073  	}
  1074  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1075  		t.Fatalf("succeeded withholding attacker synchronisation")
  1076  	}
  1077  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1078  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1079  	}
  1080  	if mode == FastSync {
  1081  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1082  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1083  		}
  1084  	}
  1085  
  1086  	// synchronise with the valid peer and make sure sync succeeds. Since the last rollback
  1087  	// should also disable fast syncing for this process, verify that we did a fresh full
  1088  	// sync. Note, we can't assert anything about the receipts since we won't purge the
  1089  	// database of them, hence we can't use assertOwnChain.
  1090  	tester.newPeer("valid", protocol, chain)
  1091  	if err := tester.sync("valid", nil, mode); err != nil {
  1092  		t.Fatalf("failed to synchronise blocks: %v", err)
  1093  	}
  1094  	if hs := len(tester.ownHeaders); hs != chain.len() {
  1095  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
  1096  	}
  1097  	if mode != LightSync {
  1098  		if bs := len(tester.ownBlocks); bs != chain.len() {
  1099  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
  1100  		}
  1101  	}
  1102  	tester.terminate()
  1103  }
  1104  
  1105  // Tests that a peer advertising a high TD doesn't get to stall the downloader
  1106  // afterwards by not sending any useful hashes.
  1107  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1108  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1109  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1110  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1111  func TestHighTDStarvationAttack65Full(t *testing.T)  { testHighTDStarvationAttack(t, 65, FullSync) }
  1112  func TestHighTDStarvationAttack65Fast(t *testing.T)  { testHighTDStarvationAttack(t, 65, FastSync) }
  1113  func TestHighTDStarvationAttack65Light(t *testing.T) { testHighTDStarvationAttack(t, 65, LightSync) }
  1114  
  1115  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1116  	t.Parallel()
  1117  
  1118  	tester := newTester()
  1119  
  1120  	chain := testChainBase.shorten(1)
  1121  	tester.newPeer("attack", protocol, chain)
  1122  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1123  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1124  	}
  1125  	tester.terminate()
  1126  }
  1127  
  1128  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1129  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1130  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1131  func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, 65) }
  1132  
  1133  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1134  	t.Parallel()
  1135  
  1136  	// Define the disconnection requirement for individual hash fetch errors
  1137  	tests := []struct {
  1138  		result error
  1139  		drop   bool
  1140  	}{
  1141  		{nil, false},                        // Sync succeeded, all is well
  1142  		{errBusy, false},                    // Sync is already in progress, no problem
  1143  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1144  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1145  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1146  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
  1147  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1148  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1149  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1150  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1151  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1152  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1153  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1154  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1155  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1156  	}
  1157  	// Run the tests and check disconnection status
  1158  	tester := newTester()
  1159  	defer tester.terminate()
  1160  	chain := testChainBase.shorten(1)
  1161  
  1162  	for i, tt := range tests {
  1163  		// Register a new peer and ensure its presence
  1164  		id := fmt.Sprintf("test %d", i)
  1165  		if err := tester.newPeer(id, protocol, chain); err != nil {
  1166  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1167  		}
  1168  		if _, ok := tester.peers[id]; !ok {
  1169  			t.Fatalf("test %d: registered peer not found", i)
  1170  		}
  1171  		// Simulate a synchronisation and check the required result
  1172  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1173  
  1174  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1175  		if _, ok := tester.peers[id]; !ok != tt.drop {
  1176  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1177  		}
  1178  	}
  1179  }
  1180  
  1181  // Tests that synchronisation progress (origin block number, current block number
  1182  // and highest block number) is tracked and updated correctly.
  1183  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1184  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1185  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1186  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1187  func TestSyncProgress65Full(t *testing.T)  { testSyncProgress(t, 65, FullSync) }
  1188  func TestSyncProgress65Fast(t *testing.T)  { testSyncProgress(t, 65, FastSync) }
  1189  func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, 65, LightSync) }
  1190  
  1191  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1192  	t.Parallel()
  1193  
  1194  	tester := newTester()
  1195  	defer tester.terminate()
  1196  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1197  
  1198  	// Set a sync init hook to catch progress changes
  1199  	starting := make(chan struct{})
  1200  	progress := make(chan struct{})
  1201  
  1202  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1203  		starting <- struct{}{}
  1204  		<-progress
  1205  	}
  1206  	checkProgress(t, tester.downloader, "pristine", core.SyncProgress{})
  1207  
  1208  	// Synchronise half the blocks and check initial progress
  1209  	tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1210  	pending := new(sync.WaitGroup)
  1211  	pending.Add(1)
  1212  
  1213  	go func() {
  1214  		defer pending.Done()
  1215  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1216  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1217  		}
  1218  	}()
  1219  	<-starting
  1220  	checkProgress(t, tester.downloader, "initial", core.SyncProgress{
  1221  		HighestBlock: uint64(chain.len()/2 - 1),
  1222  	})
  1223  	progress <- struct{}{}
  1224  	pending.Wait()
  1225  
  1226  	// Synchronise all the blocks and check continuation progress
  1227  	tester.newPeer("peer-full", protocol, chain)
  1228  	pending.Add(1)
  1229  	go func() {
  1230  		defer pending.Done()
  1231  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1232  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1233  		}
  1234  	}()
  1235  	<-starting
  1236  	checkProgress(t, tester.downloader, "completing", core.SyncProgress{
  1237  		StartingBlock: uint64(chain.len()/2 - 1),
  1238  		CurrentBlock:  uint64(chain.len()/2 - 1),
  1239  		HighestBlock:  uint64(chain.len() - 1),
  1240  	})
  1241  
  1242  	// Check final progress after successful sync
  1243  	progress <- struct{}{}
  1244  	pending.Wait()
  1245  	checkProgress(t, tester.downloader, "final", core.SyncProgress{
  1246  		StartingBlock: uint64(chain.len()/2 - 1),
  1247  		CurrentBlock:  uint64(chain.len() - 1),
  1248  		HighestBlock:  uint64(chain.len() - 1),
  1249  	})
  1250  }
  1251  
  1252  func checkProgress(t *testing.T, d *Downloader, stage string, want core.SyncProgress) {
  1253  	// Mark this method as a helper to report errors at callsite, not in here
  1254  	t.Helper()
  1255  
  1256  	p := d.Progress()
  1257  	p.KnownStates, p.PulledStates = 0, 0
  1258  	want.KnownStates, want.PulledStates = 0, 0
  1259  	if p != want {
  1260  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1261  	}
  1262  }
  1263  
  1264  // Tests that synchronisation progress (origin block number and highest block
  1265  // number) is tracked and updated correctly in case of a fork (or manual head
  1266  // revertal).
  1267  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1268  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1269  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1270  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1271  func TestForkedSyncProgress65Full(t *testing.T)  { testForkedSyncProgress(t, 65, FullSync) }
  1272  func TestForkedSyncProgress65Fast(t *testing.T)  { testForkedSyncProgress(t, 65, FastSync) }
  1273  func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, 65, LightSync) }
  1274  
  1275  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1276  	t.Parallel()
  1277  
  1278  	tester := newTester()
  1279  	defer tester.terminate()
  1280  	chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch)
  1281  	chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch)
  1282  
  1283  	// Set a sync init hook to catch progress changes
  1284  	starting := make(chan struct{})
  1285  	progress := make(chan struct{})
  1286  
  1287  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1288  		starting <- struct{}{}
  1289  		<-progress
  1290  	}
  1291  	checkProgress(t, tester.downloader, "pristine", core.SyncProgress{})
  1292  
  1293  	// Synchronise with one of the forks and check progress
  1294  	tester.newPeer("fork A", protocol, chainA)
  1295  	pending := new(sync.WaitGroup)
  1296  	pending.Add(1)
  1297  	go func() {
  1298  		defer pending.Done()
  1299  		if err := tester.sync("fork A", nil, mode); err != nil {
  1300  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1301  		}
  1302  	}()
  1303  	<-starting
  1304  
  1305  	checkProgress(t, tester.downloader, "initial", core.SyncProgress{
  1306  		HighestBlock: uint64(chainA.len() - 1),
  1307  	})
  1308  	progress <- struct{}{}
  1309  	pending.Wait()
  1310  
  1311  	// Simulate a successful sync above the fork
  1312  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1313  
  1314  	// Synchronise with the second fork and check progress resets
  1315  	tester.newPeer("fork B", protocol, chainB)
  1316  	pending.Add(1)
  1317  	go func() {
  1318  		defer pending.Done()
  1319  		if err := tester.sync("fork B", nil, mode); err != nil {
  1320  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1321  		}
  1322  	}()
  1323  	<-starting
  1324  	checkProgress(t, tester.downloader, "forking", core.SyncProgress{
  1325  		StartingBlock: uint64(testChainBase.len()) - 1,
  1326  		CurrentBlock:  uint64(chainA.len() - 1),
  1327  		HighestBlock:  uint64(chainB.len() - 1),
  1328  	})
  1329  
  1330  	// Check final progress after successful sync
  1331  	progress <- struct{}{}
  1332  	pending.Wait()
  1333  	checkProgress(t, tester.downloader, "final", core.SyncProgress{
  1334  		StartingBlock: uint64(testChainBase.len()) - 1,
  1335  		CurrentBlock:  uint64(chainB.len() - 1),
  1336  		HighestBlock:  uint64(chainB.len() - 1),
  1337  	})
  1338  }
  1339  
  1340  // Tests that if synchronisation is aborted due to some failure, then the progress
  1341  // origin is not updated in the next sync cycle, as it should be considered the
  1342  // continuation of the previous sync and not a new instance.
  1343  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1344  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1345  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1346  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1347  func TestFailedSyncProgress65Full(t *testing.T)  { testFailedSyncProgress(t, 65, FullSync) }
  1348  func TestFailedSyncProgress65Fast(t *testing.T)  { testFailedSyncProgress(t, 65, FastSync) }
  1349  func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, 65, LightSync) }
  1350  
  1351  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1352  	t.Parallel()
  1353  
  1354  	tester := newTester()
  1355  	defer tester.terminate()
  1356  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1357  
  1358  	// Set a sync init hook to catch progress changes
  1359  	starting := make(chan struct{})
  1360  	progress := make(chan struct{})
  1361  
  1362  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1363  		starting <- struct{}{}
  1364  		<-progress
  1365  	}
  1366  	checkProgress(t, tester.downloader, "pristine", core.SyncProgress{})
  1367  
  1368  	// Attempt a full sync with a faulty peer
  1369  	brokenChain := chain.shorten(chain.len())
  1370  	missing := brokenChain.len() / 2
  1371  	delete(brokenChain.headerm, brokenChain.chain[missing])
  1372  	delete(brokenChain.blockm, brokenChain.chain[missing])
  1373  	delete(brokenChain.receiptm, brokenChain.chain[missing])
  1374  	tester.newPeer("faulty", protocol, brokenChain)
  1375  
  1376  	pending := new(sync.WaitGroup)
  1377  	pending.Add(1)
  1378  	go func() {
  1379  		defer pending.Done()
  1380  		if err := tester.sync("faulty", nil, mode); err == nil {
  1381  			panic("succeeded faulty synchronisation")
  1382  		}
  1383  	}()
  1384  	<-starting
  1385  	checkProgress(t, tester.downloader, "initial", core.SyncProgress{
  1386  		HighestBlock: uint64(brokenChain.len() - 1),
  1387  	})
  1388  	progress <- struct{}{}
  1389  	pending.Wait()
  1390  	afterFailedSync := tester.downloader.Progress()
  1391  
  1392  	// Synchronise with a good peer and check that the progress origin remind the same
  1393  	// after a failure
  1394  	tester.newPeer("valid", protocol, chain)
  1395  	pending.Add(1)
  1396  	go func() {
  1397  		defer pending.Done()
  1398  		if err := tester.sync("valid", nil, mode); err != nil {
  1399  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1400  		}
  1401  	}()
  1402  	<-starting
  1403  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1404  
  1405  	// Check final progress after successful sync
  1406  	progress <- struct{}{}
  1407  	pending.Wait()
  1408  	checkProgress(t, tester.downloader, "final", core.SyncProgress{
  1409  		CurrentBlock: uint64(chain.len() - 1),
  1410  		HighestBlock: uint64(chain.len() - 1),
  1411  	})
  1412  }
  1413  
  1414  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1415  // the progress height is successfully reduced at the next sync invocation.
  1416  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1417  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1418  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1419  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1420  func TestFakedSyncProgress65Full(t *testing.T)  { testFakedSyncProgress(t, 65, FullSync) }
  1421  func TestFakedSyncProgress65Fast(t *testing.T)  { testFakedSyncProgress(t, 65, FastSync) }
  1422  func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, 65, LightSync) }
  1423  
  1424  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1425  	t.Parallel()
  1426  
  1427  	tester := newTester()
  1428  	defer tester.terminate()
  1429  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1430  
  1431  	// Set a sync init hook to catch progress changes
  1432  	starting := make(chan struct{})
  1433  	progress := make(chan struct{})
  1434  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1435  		starting <- struct{}{}
  1436  		<-progress
  1437  	}
  1438  	checkProgress(t, tester.downloader, "pristine", core.SyncProgress{})
  1439  
  1440  	// Create and sync with an attacker that promises a higher chain than available.
  1441  	brokenChain := chain.shorten(chain.len())
  1442  	numMissing := 5
  1443  	for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1444  		delete(brokenChain.headerm, brokenChain.chain[i])
  1445  	}
  1446  	tester.newPeer("attack", protocol, brokenChain)
  1447  
  1448  	pending := new(sync.WaitGroup)
  1449  	pending.Add(1)
  1450  	go func() {
  1451  		defer pending.Done()
  1452  		if err := tester.sync("attack", nil, mode); err == nil {
  1453  			panic("succeeded attacker synchronisation")
  1454  		}
  1455  	}()
  1456  	<-starting
  1457  	checkProgress(t, tester.downloader, "initial", core.SyncProgress{
  1458  		HighestBlock: uint64(brokenChain.len() - 1),
  1459  	})
  1460  	progress <- struct{}{}
  1461  	pending.Wait()
  1462  	afterFailedSync := tester.downloader.Progress()
  1463  
  1464  	// Synchronise with a good peer and check that the progress height has been reduced to
  1465  	// the true value.
  1466  	validChain := chain.shorten(chain.len() - numMissing)
  1467  	tester.newPeer("valid", protocol, validChain)
  1468  	pending.Add(1)
  1469  
  1470  	go func() {
  1471  		defer pending.Done()
  1472  		if err := tester.sync("valid", nil, mode); err != nil {
  1473  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1474  		}
  1475  	}()
  1476  	<-starting
  1477  	checkProgress(t, tester.downloader, "completing", core.SyncProgress{
  1478  		CurrentBlock: afterFailedSync.CurrentBlock,
  1479  		HighestBlock: uint64(validChain.len() - 1),
  1480  	})
  1481  
  1482  	// Check final progress after successful sync.
  1483  	progress <- struct{}{}
  1484  	pending.Wait()
  1485  	checkProgress(t, tester.downloader, "final", core.SyncProgress{
  1486  		CurrentBlock: uint64(validChain.len() - 1),
  1487  		HighestBlock: uint64(validChain.len() - 1),
  1488  	})
  1489  }
  1490  
  1491  // This test reproduces an issue where unexpected deliveries would
  1492  // block indefinitely if they arrived at the right time.
  1493  func TestDeliverHeadersHang(t *testing.T) {
  1494  	t.Parallel()
  1495  
  1496  	testCases := []struct {
  1497  		protocol int
  1498  		syncMode SyncMode
  1499  	}{
  1500  		{63, FullSync},
  1501  		{63, FastSync},
  1502  		{64, FullSync},
  1503  		{64, FastSync},
  1504  		{64, LightSync},
  1505  		{65, FullSync},
  1506  		{65, FastSync},
  1507  		{65, LightSync},
  1508  	}
  1509  	for _, tc := range testCases {
  1510  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1511  			t.Parallel()
  1512  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1513  		})
  1514  	}
  1515  }
  1516  
  1517  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1518  	master := newTester()
  1519  	defer master.terminate()
  1520  	chain := testChainBase.shorten(15)
  1521  
  1522  	for i := 0; i < 200; i++ {
  1523  		tester := newTester()
  1524  		tester.peerDb = master.peerDb
  1525  		tester.newPeer("peer", protocol, chain)
  1526  
  1527  		// Whenever the downloader requests headers, flood it with
  1528  		// a lot of unrequested header deliveries.
  1529  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1530  			peer:   tester.downloader.peers.peers["peer"].peer,
  1531  			tester: tester,
  1532  		}
  1533  		if err := tester.sync("peer", nil, mode); err != nil {
  1534  			t.Errorf("test %d: sync failed: %v", i, err)
  1535  		}
  1536  		tester.terminate()
  1537  	}
  1538  }
  1539  
  1540  type floodingTestPeer struct {
  1541  	peer   Peer
  1542  	tester *downloadTester
  1543  }
  1544  
  1545  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1546  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1547  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1548  }
  1549  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1550  	return ftp.peer.RequestBodies(hashes)
  1551  }
  1552  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1553  	return ftp.peer.RequestReceipts(hashes)
  1554  }
  1555  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1556  	return ftp.peer.RequestNodeData(hashes)
  1557  }
  1558  
  1559  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1560  	deliveriesDone := make(chan struct{}, 500)
  1561  	for i := 0; i < cap(deliveriesDone)-1; i++ {
  1562  		peer := fmt.Sprintf("fake-peer%d", i)
  1563  		go func() {
  1564  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1565  			deliveriesDone <- struct{}{}
  1566  		}()
  1567  	}
  1568  
  1569  	// None of the extra deliveries should block.
  1570  	timeout := time.After(60 * time.Second)
  1571  	launched := false
  1572  	for i := 0; i < cap(deliveriesDone); i++ {
  1573  		select {
  1574  		case <-deliveriesDone:
  1575  			if !launched {
  1576  				// Start delivering the requested headers
  1577  				// after one of the flooding responses has arrived.
  1578  				go func() {
  1579  					ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1580  					deliveriesDone <- struct{}{}
  1581  				}()
  1582  				launched = true
  1583  			}
  1584  		case <-timeout:
  1585  			panic("blocked")
  1586  		}
  1587  	}
  1588  	return nil
  1589  }
  1590  
  1591  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1592  	testCases := []struct {
  1593  		remoteHeight uint64
  1594  		localHeight  uint64
  1595  		expected     []int
  1596  	}{
  1597  		// Remote is way higher. We should ask for the remote head and go backwards
  1598  		{1500, 1000,
  1599  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1600  		},
  1601  		{15000, 13006,
  1602  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1603  		},
  1604  		// Remote is pretty close to us. We don't have to fetch as many
  1605  		{1200, 1150,
  1606  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1607  		},
  1608  		// Remote is equal to us (so on a fork with higher td)
  1609  		// We should get the closest couple of ancestors
  1610  		{1500, 1500,
  1611  			[]int{1497, 1499},
  1612  		},
  1613  		// We're higher than the remote! Odd
  1614  		{1000, 1500,
  1615  			[]int{997, 999},
  1616  		},
  1617  		// Check some weird edgecases that it behaves somewhat rationally
  1618  		{0, 1500,
  1619  			[]int{0, 2},
  1620  		},
  1621  		{6000000, 0,
  1622  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1623  		},
  1624  		{0, 0,
  1625  			[]int{0, 2},
  1626  		},
  1627  	}
  1628  	reqs := func(from, count, span int) []int {
  1629  		var r []int
  1630  		num := from
  1631  		for len(r) < count {
  1632  			r = append(r, num)
  1633  			num += span + 1
  1634  		}
  1635  		return r
  1636  	}
  1637  	for i, tt := range testCases {
  1638  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1639  		data := reqs(int(from), count, span)
  1640  
  1641  		if max != uint64(data[len(data)-1]) {
  1642  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1643  		}
  1644  		failed := false
  1645  		if len(data) != len(tt.expected) {
  1646  			failed = true
  1647  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1648  		} else {
  1649  			for j, n := range data {
  1650  				if n != tt.expected[j] {
  1651  					failed = true
  1652  					break
  1653  				}
  1654  			}
  1655  		}
  1656  		if failed {
  1657  			res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1658  			exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1659  			t.Logf("got: %v\n", res)
  1660  			t.Logf("exp: %v\n", exp)
  1661  			t.Errorf("test %d: wrong values", i)
  1662  		}
  1663  	}
  1664  }
  1665  
  1666  // Tests that peers below a pre-configured checkpoint block are prevented from
  1667  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1668  func TestCheckpointEnforcement63Full(t *testing.T)  { testCheckpointEnforcement(t, 63, FullSync) }
  1669  func TestCheckpointEnforcement63Fast(t *testing.T)  { testCheckpointEnforcement(t, 63, FastSync) }
  1670  func TestCheckpointEnforcement64Full(t *testing.T)  { testCheckpointEnforcement(t, 64, FullSync) }
  1671  func TestCheckpointEnforcement64Fast(t *testing.T)  { testCheckpointEnforcement(t, 64, FastSync) }
  1672  func TestCheckpointEnforcement65Full(t *testing.T)  { testCheckpointEnforcement(t, 65, FullSync) }
  1673  func TestCheckpointEnforcement65Fast(t *testing.T)  { testCheckpointEnforcement(t, 65, FastSync) }
  1674  func TestCheckpointEnforcement65Light(t *testing.T) { testCheckpointEnforcement(t, 65, LightSync) }
  1675  
  1676  func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) {
  1677  	t.Parallel()
  1678  
  1679  	// Create a new tester with a particular hard coded checkpoint block
  1680  	tester := newTester()
  1681  	defer tester.terminate()
  1682  
  1683  	tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1684  	chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1685  
  1686  	// Attempt to sync with the peer and validate the result
  1687  	tester.newPeer("peer", protocol, chain)
  1688  
  1689  	var expect error
  1690  	if mode == FastSync || mode == LightSync {
  1691  		expect = errUnsyncedPeer
  1692  	}
  1693  	if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1694  		t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1695  	}
  1696  	if mode == FastSync || mode == LightSync {
  1697  		assertOwnChain(t, tester, 1)
  1698  	} else {
  1699  		assertOwnChain(t, tester, chain.len())
  1700  	}
  1701  }