github.com/codingfuture/orig-energi3@v0.8.4/eth/downloader/downloader_test.go (about)

     1  // Copyright 2018 The Energi Core Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of the Energi Core library.
     4  //
     5  // The Energi Core library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The Energi Core library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the Energi Core library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package downloader
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"strings"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum"
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/core/types"
    33  	"github.com/ethereum/go-ethereum/ethdb"
    34  	"github.com/ethereum/go-ethereum/event"
    35  	"github.com/ethereum/go-ethereum/trie"
    36  )
    37  
    38  // Reduce some of the parameters to make the tester faster.
    39  func init() {
    40  	MaxForkAncestry = uint64(10000)
    41  	blockCacheItems = 1024
    42  	fsHeaderContCheck = 500 * time.Millisecond
    43  }
    44  
    45  // downloadTester is a test simulator for mocking out local block chain.
    46  type downloadTester struct {
    47  	downloader *Downloader
    48  
    49  	genesis *types.Block   // Genesis blocks used by the tester and peers
    50  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    51  	peerDb  ethdb.Database // Database of the peers containing all data
    52  	peers   map[string]*downloadTesterPeer
    53  
    54  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    55  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    56  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    57  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    58  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    59  
    60  	lock sync.RWMutex
    61  }
    62  
    63  // newTester creates a new downloader test mocker.
    64  func newTester() *downloadTester {
    65  	tester := &downloadTester{
    66  		genesis:     testGenesis,
    67  		peerDb:      testDB,
    68  		peers:       make(map[string]*downloadTesterPeer),
    69  		ownHashes:   []common.Hash{testGenesis.Hash()},
    70  		ownHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    71  		ownBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    72  		ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    73  		ownChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    74  	}
    75  	tester.stateDb = ethdb.NewMemDatabase()
    76  	tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
    77  
    78  	tester.downloader = New(FullSync, 0, tester.stateDb, new(event.TypeMux), tester, tester, nil, tester.dropPeer)
    79  	return tester
    80  }
    81  
    82  // terminate aborts any operations on the embedded downloader and releases all
    83  // held resources.
    84  func (dl *downloadTester) terminate() {
    85  	dl.downloader.Terminate()
    86  }
    87  
    88  // sync starts synchronizing with a remote peer, blocking until it completes.
    89  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
    90  	dl.lock.RLock()
    91  	hash := dl.peers[id].chain.headBlock().Hash()
    92  	// If no particular TD was requested, load from the peer's blockchain
    93  	if td == nil {
    94  		td = dl.peers[id].chain.td(hash)
    95  	}
    96  	dl.lock.RUnlock()
    97  
    98  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
    99  	err := dl.downloader.synchronise(id, hash, td, mode)
   100  	select {
   101  	case <-dl.downloader.cancelCh:
   102  		// Ok, downloader fully cancelled after sync cycle
   103  	default:
   104  		// Downloader is still accepting packets, can block a peer up
   105  		panic("downloader active post sync cycle") // panic will be caught by tester
   106  	}
   107  	return err
   108  }
   109  
   110  // HasHeader checks if a header is present in the testers canonical chain.
   111  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   112  	return dl.GetHeaderByHash(hash) != nil
   113  }
   114  
   115  // HasBlock checks if a block is present in the testers canonical chain.
   116  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   117  	return dl.GetBlockByHash(hash) != nil
   118  }
   119  
   120  // HasFastBlock checks if a block is present in the testers canonical chain.
   121  func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
   122  	dl.lock.RLock()
   123  	defer dl.lock.RUnlock()
   124  
   125  	_, ok := dl.ownReceipts[hash]
   126  	return ok
   127  }
   128  
   129  // GetHeader retrieves a header from the testers canonical chain.
   130  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   131  	dl.lock.RLock()
   132  	defer dl.lock.RUnlock()
   133  
   134  	return dl.ownHeaders[hash]
   135  }
   136  
   137  // GetBlock retrieves a block from the testers canonical chain.
   138  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   139  	dl.lock.RLock()
   140  	defer dl.lock.RUnlock()
   141  
   142  	return dl.ownBlocks[hash]
   143  }
   144  
   145  // CurrentHeader retrieves the current head header from the canonical chain.
   146  func (dl *downloadTester) CurrentHeader() *types.Header {
   147  	dl.lock.RLock()
   148  	defer dl.lock.RUnlock()
   149  
   150  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   151  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   152  			return header
   153  		}
   154  	}
   155  	return dl.genesis.Header()
   156  }
   157  
   158  // CurrentBlock retrieves the current head block from the canonical chain.
   159  func (dl *downloadTester) CurrentBlock() *types.Block {
   160  	dl.lock.RLock()
   161  	defer dl.lock.RUnlock()
   162  
   163  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   164  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   165  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   166  				return block
   167  			}
   168  		}
   169  	}
   170  	return dl.genesis
   171  }
   172  
   173  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   174  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   175  	dl.lock.RLock()
   176  	defer dl.lock.RUnlock()
   177  
   178  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   179  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   180  			return block
   181  		}
   182  	}
   183  	return dl.genesis
   184  }
   185  
   186  // FastSyncCommitHead manually sets the head block to a given hash.
   187  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   188  	// For now only check that the state trie is correct
   189  	if block := dl.GetBlockByHash(hash); block != nil {
   190  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0)
   191  		return err
   192  	}
   193  	return fmt.Errorf("non existent block: %x", hash[:4])
   194  }
   195  
   196  // GetTd retrieves the block's total difficulty from the canonical chain.
   197  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   198  	dl.lock.RLock()
   199  	defer dl.lock.RUnlock()
   200  
   201  	return dl.ownChainTd[hash]
   202  }
   203  
   204  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   205  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
   206  	dl.lock.Lock()
   207  	defer dl.lock.Unlock()
   208  
   209  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   210  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   211  		return 0, errors.New("unknown parent")
   212  	}
   213  	for i := 1; i < len(headers); i++ {
   214  		if headers[i].ParentHash != headers[i-1].Hash() {
   215  			return i, errors.New("unknown parent")
   216  		}
   217  	}
   218  	// Do a full insert if pre-checks passed
   219  	for i, header := range headers {
   220  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   221  			continue
   222  		}
   223  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   224  			return i, errors.New("unknown parent")
   225  		}
   226  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   227  		dl.ownHeaders[header.Hash()] = header
   228  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   229  	}
   230  	return len(headers), nil
   231  }
   232  
   233  // InsertChain injects a new batch of blocks into the simulated chain.
   234  func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
   235  	dl.lock.Lock()
   236  	defer dl.lock.Unlock()
   237  
   238  	for i, block := range blocks {
   239  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   240  			return i, errors.New("unknown parent")
   241  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   242  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   243  		}
   244  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   245  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   246  			dl.ownHeaders[block.Hash()] = block.Header()
   247  		}
   248  		dl.ownBlocks[block.Hash()] = block
   249  		dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
   250  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   251  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   252  	}
   253  	return len(blocks), nil
   254  }
   255  
   256  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   257  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (i int, err error) {
   258  	dl.lock.Lock()
   259  	defer dl.lock.Unlock()
   260  
   261  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   262  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   263  			return i, errors.New("unknown owner")
   264  		}
   265  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   266  			return i, errors.New("unknown parent")
   267  		}
   268  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   269  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   270  	}
   271  	return len(blocks), nil
   272  }
   273  
   274  // Rollback removes some recently added elements from the chain.
   275  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   276  	dl.lock.Lock()
   277  	defer dl.lock.Unlock()
   278  
   279  	for i := len(hashes) - 1; i >= 0; i-- {
   280  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   281  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   282  		}
   283  		delete(dl.ownChainTd, hashes[i])
   284  		delete(dl.ownHeaders, hashes[i])
   285  		delete(dl.ownReceipts, hashes[i])
   286  		delete(dl.ownBlocks, hashes[i])
   287  	}
   288  }
   289  
   290  // newPeer registers a new block download source into the downloader.
   291  func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error {
   292  	dl.lock.Lock()
   293  	defer dl.lock.Unlock()
   294  
   295  	peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
   296  	dl.peers[id] = peer
   297  	return dl.downloader.RegisterPeer(id, version, peer)
   298  }
   299  
   300  // dropPeer simulates a hard peer removal from the connection pool.
   301  func (dl *downloadTester) dropPeer(id string) {
   302  	dl.lock.Lock()
   303  	defer dl.lock.Unlock()
   304  
   305  	delete(dl.peers, id)
   306  	dl.downloader.UnregisterPeer(id)
   307  }
   308  
   309  type downloadTesterPeer struct {
   310  	dl            *downloadTester
   311  	id            string
   312  	lock          sync.RWMutex
   313  	chain         *testChain
   314  	missingStates map[common.Hash]bool // State entries that fast sync should not return
   315  }
   316  
   317  // Head constructs a function to retrieve a peer's current head hash
   318  // and total difficulty.
   319  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   320  	b := dlp.chain.headBlock()
   321  	return b.Hash(), dlp.chain.td(b.Hash())
   322  }
   323  
   324  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   325  // origin; associated with a particular peer in the download tester. The returned
   326  // function can be used to retrieve batches of headers from the particular peer.
   327  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   328  	if reverse {
   329  		panic("reverse header requests not supported")
   330  	}
   331  
   332  	result := dlp.chain.headersByHash(origin, amount, skip)
   333  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   334  	return nil
   335  }
   336  
   337  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   338  // origin; associated with a particular peer in the download tester. The returned
   339  // function can be used to retrieve batches of headers from the particular peer.
   340  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   341  	if reverse {
   342  		panic("reverse header requests not supported")
   343  	}
   344  
   345  	result := dlp.chain.headersByNumber(origin, amount, skip)
   346  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   347  	return nil
   348  }
   349  
   350  // RequestBodies constructs a getBlockBodies method associated with a particular
   351  // peer in the download tester. The returned function can be used to retrieve
   352  // batches of block bodies from the particularly requested peer.
   353  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   354  	txs, uncles := dlp.chain.bodies(hashes)
   355  	go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
   356  	return nil
   357  }
   358  
   359  // RequestReceipts constructs a getReceipts method associated with a particular
   360  // peer in the download tester. The returned function can be used to retrieve
   361  // batches of block receipts from the particularly requested peer.
   362  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   363  	receipts := dlp.chain.receipts(hashes)
   364  	go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
   365  	return nil
   366  }
   367  
   368  // RequestNodeData constructs a getNodeData method associated with a particular
   369  // peer in the download tester. The returned function can be used to retrieve
   370  // batches of node state data from the particularly requested peer.
   371  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   372  	dlp.dl.lock.RLock()
   373  	defer dlp.dl.lock.RUnlock()
   374  
   375  	results := make([][]byte, 0, len(hashes))
   376  	for _, hash := range hashes {
   377  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   378  			if !dlp.missingStates[hash] {
   379  				results = append(results, data)
   380  			}
   381  		}
   382  	}
   383  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   384  	return nil
   385  }
   386  
   387  // assertOwnChain checks if the local chain contains the correct number of items
   388  // of the various chain components.
   389  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   390  	// Mark this method as a helper to report errors at callsite, not in here
   391  	t.Helper()
   392  
   393  	assertOwnForkedChain(t, tester, 1, []int{length})
   394  }
   395  
   396  // assertOwnForkedChain checks if the local forked chain contains the correct
   397  // number of items of the various chain components.
   398  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   399  	// Mark this method as a helper to report errors at callsite, not in here
   400  	t.Helper()
   401  
   402  	// Initialize the counters for the first fork
   403  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
   404  
   405  	// Update the counters for each subsequent fork
   406  	for _, length := range lengths[1:] {
   407  		headers += length - common
   408  		blocks += length - common
   409  		receipts += length - common
   410  	}
   411  	if tester.downloader.mode == LightSync {
   412  		blocks, receipts = 1, 1
   413  	}
   414  	if hs := len(tester.ownHeaders); hs != headers {
   415  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   416  	}
   417  	if bs := len(tester.ownBlocks); bs != blocks {
   418  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   419  	}
   420  	if rs := len(tester.ownReceipts); rs != receipts {
   421  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   422  	}
   423  }
   424  
   425  // Tests that simple synchronization against a canonical chain works correctly.
   426  // In this test common ancestor lookup should be short circuited and not require
   427  // binary searching.
   428  //func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   429  //func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   430  //func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   431  func TestCanonicalSynchronisation70Full(t *testing.T)  { testCanonicalSynchronisation(t, 70, FullSync) }
   432  func TestCanonicalSynchronisation70Fast(t *testing.T)  { testCanonicalSynchronisation(t, 70, FastSync) }
   433  func TestCanonicalSynchronisation70Light(t *testing.T) { testCanonicalSynchronisation(t, 70, LightSync) }
   434  
   435  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   436  	t.Parallel()
   437  
   438  	tester := newTester()
   439  	defer tester.terminate()
   440  
   441  	// Create a small enough block chain to download
   442  	chain := testChainBase.shorten(blockCacheItems - 15)
   443  	tester.newPeer("peer", protocol, chain)
   444  
   445  	// Synchronise with the peer and make sure all relevant data was retrieved
   446  	if err := tester.sync("peer", nil, mode); err != nil {
   447  		t.Fatalf("failed to synchronise blocks: %v", err)
   448  	}
   449  	assertOwnChain(t, tester, chain.len())
   450  }
   451  
   452  // Tests that if a large batch of blocks are being downloaded, it is throttled
   453  // until the cached blocks are retrieved.
   454  //func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   455  //func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   456  //func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   457  func TestThrottling70Full(t *testing.T) { testThrottling(t, 70, FullSync) }
   458  func TestThrottling70Fast(t *testing.T) { testThrottling(t, 70, FastSync) }
   459  
   460  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   461  	t.Parallel()
   462  	tester := newTester()
   463  	defer tester.terminate()
   464  
   465  	// Create a long block chain to download and the tester
   466  	targetBlocks := testChainBase.len() - 1
   467  	tester.newPeer("peer", protocol, testChainBase)
   468  
   469  	// Wrap the importer to allow stepping
   470  	blocked, proceed := uint32(0), make(chan struct{})
   471  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   472  		atomic.StoreUint32(&blocked, uint32(len(results)))
   473  		<-proceed
   474  	}
   475  	// Start a synchronisation concurrently
   476  	errc := make(chan error)
   477  	go func() {
   478  		errc <- tester.sync("peer", nil, mode)
   479  	}()
   480  	// Iteratively take some blocks, always checking the retrieval count
   481  	for {
   482  		// Check the retrieval count synchronously (! reason for this ugly block)
   483  		tester.lock.RLock()
   484  		retrieved := len(tester.ownBlocks)
   485  		tester.lock.RUnlock()
   486  		if retrieved >= targetBlocks+1 {
   487  			break
   488  		}
   489  		// Wait a bit for sync to throttle itself
   490  		var cached, frozen int
   491  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   492  			time.Sleep(25 * time.Millisecond)
   493  
   494  			tester.lock.Lock()
   495  			tester.downloader.queue.lock.Lock()
   496  			cached = len(tester.downloader.queue.blockDonePool)
   497  			if mode == FastSync {
   498  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   499  					cached = receipts
   500  				}
   501  			}
   502  			frozen = int(atomic.LoadUint32(&blocked))
   503  			retrieved = len(tester.ownBlocks)
   504  			tester.downloader.queue.lock.Unlock()
   505  			tester.lock.Unlock()
   506  
   507  			if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   508  				break
   509  			}
   510  		}
   511  		// Make sure we filled up the cache, then exhaust it
   512  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   513  
   514  		tester.lock.RLock()
   515  		retrieved = len(tester.ownBlocks)
   516  		tester.lock.RUnlock()
   517  		if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   518  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   519  		}
   520  		// Permit the blocked blocks to import
   521  		if atomic.LoadUint32(&blocked) > 0 {
   522  			atomic.StoreUint32(&blocked, uint32(0))
   523  			proceed <- struct{}{}
   524  		}
   525  	}
   526  	// Check that we haven't pulled more blocks than available
   527  	assertOwnChain(t, tester, targetBlocks+1)
   528  	if err := <-errc; err != nil {
   529  		t.Fatalf("block synchronization failed: %v", err)
   530  	}
   531  }
   532  
   533  // Tests that simple synchronization against a forked chain works correctly. In
   534  // this test common ancestor lookup should *not* be short circuited, and a full
   535  // binary search should be executed.
   536  //func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   537  //func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   538  //func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   539  func TestForkedSync70Full(t *testing.T)  { testForkedSync(t, 70, FullSync) }
   540  func TestForkedSync70Fast(t *testing.T)  { testForkedSync(t, 70, FastSync) }
   541  func TestForkedSync70Light(t *testing.T) { testForkedSync(t, 70, LightSync) }
   542  
   543  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   544  	t.Parallel()
   545  
   546  	tester := newTester()
   547  	defer tester.terminate()
   548  
   549  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   550  	chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
   551  	tester.newPeer("fork A", protocol, chainA)
   552  	tester.newPeer("fork B", protocol, chainB)
   553  
   554  	// Synchronise with the peer and make sure all blocks were retrieved
   555  	if err := tester.sync("fork A", nil, mode); err != nil {
   556  		t.Fatalf("failed to synchronise blocks: %v", err)
   557  	}
   558  	assertOwnChain(t, tester, chainA.len())
   559  
   560  	// Synchronise with the second peer and make sure that fork is pulled too
   561  	if err := tester.sync("fork B", nil, mode); err != nil {
   562  		t.Fatalf("failed to synchronise blocks: %v", err)
   563  	}
   564  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   565  }
   566  
   567  // Tests that synchronising against a much shorter but much heavyer fork works
   568  // corrently and is not dropped.
   569  //func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   570  //func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   571  //func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   572  func TestHeavyForkedSync70Full(t *testing.T)  { testHeavyForkedSync(t, 70, FullSync) }
   573  func TestHeavyForkedSync70Fast(t *testing.T)  { testHeavyForkedSync(t, 70, FastSync) }
   574  func TestHeavyForkedSync70Light(t *testing.T) { testHeavyForkedSync(t, 70, LightSync) }
   575  
   576  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   577  	t.Parallel()
   578  
   579  	tester := newTester()
   580  	defer tester.terminate()
   581  
   582  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   583  	chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
   584  	tester.newPeer("light", protocol, chainA)
   585  	tester.newPeer("heavy", protocol, chainB)
   586  
   587  	// Synchronise with the peer and make sure all blocks were retrieved
   588  	if err := tester.sync("light", nil, mode); err != nil {
   589  		t.Fatalf("failed to synchronise blocks: %v", err)
   590  	}
   591  	assertOwnChain(t, tester, chainA.len())
   592  
   593  	// Synchronise with the second peer and make sure that fork is pulled too
   594  	if err := tester.sync("heavy", nil, mode); err != nil {
   595  		t.Fatalf("failed to synchronise blocks: %v", err)
   596  	}
   597  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   598  }
   599  
   600  // Tests that chain forks are contained within a certain interval of the current
   601  // chain head, ensuring that malicious peers cannot waste resources by feeding
   602  // long dead chains.
   603  //func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   604  //func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   605  //func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   606  func TestBoundedForkedSync70Full(t *testing.T)  { testBoundedForkedSync(t, 70, FullSync) }
   607  func TestBoundedForkedSync70Fast(t *testing.T)  { testBoundedForkedSync(t, 70, FastSync) }
   608  func TestBoundedForkedSync70Light(t *testing.T) { testBoundedForkedSync(t, 70, LightSync) }
   609  
   610  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   611  	t.Parallel()
   612  
   613  	tester := newTester()
   614  	defer tester.terminate()
   615  
   616  	chainA := testChainForkLightA
   617  	chainB := testChainForkLightB
   618  	tester.newPeer("original", protocol, chainA)
   619  	tester.newPeer("rewriter", protocol, chainB)
   620  
   621  	// Synchronise with the peer and make sure all blocks were retrieved
   622  	if err := tester.sync("original", nil, mode); err != nil {
   623  		t.Fatalf("failed to synchronise blocks: %v", err)
   624  	}
   625  	assertOwnChain(t, tester, chainA.len())
   626  
   627  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   628  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   629  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   630  	}
   631  }
   632  
   633  // Tests that chain forks are contained within a certain interval of the current
   634  // chain head for short but heavy forks too. These are a bit special because they
   635  // take different ancestor lookup paths.
   636  //func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   637  //func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   638  //func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   639  func TestBoundedHeavyForkedSync70Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 70, FullSync) }
   640  func TestBoundedHeavyForkedSync70Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 70, FastSync) }
   641  func TestBoundedHeavyForkedSync70Light(t *testing.T) { testBoundedHeavyForkedSync(t, 70, LightSync) }
   642  
   643  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   644  	t.Parallel()
   645  
   646  	tester := newTester()
   647  	defer tester.terminate()
   648  
   649  	// Create a long enough forked chain
   650  	chainA := testChainForkLightA
   651  	chainB := testChainForkHeavy
   652  	tester.newPeer("original", protocol, chainA)
   653  	tester.newPeer("heavy-rewriter", protocol, chainB)
   654  
   655  	// Synchronise with the peer and make sure all blocks were retrieved
   656  	if err := tester.sync("original", nil, mode); err != nil {
   657  		t.Fatalf("failed to synchronise blocks: %v", err)
   658  	}
   659  	assertOwnChain(t, tester, chainA.len())
   660  
   661  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   662  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   663  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   664  	}
   665  }
   666  
   667  // Tests that an inactive downloader will not accept incoming block headers and
   668  // bodies.
   669  func TestInactiveDownloader62(t *testing.T) {
   670  	t.Parallel()
   671  
   672  	tester := newTester()
   673  	defer tester.terminate()
   674  
   675  	// Check that neither block headers nor bodies are accepted
   676  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   677  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   678  	}
   679  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   680  		t.Errorf("error mismatch: have %v, want  %v", err, errNoSyncActive)
   681  	}
   682  }
   683  
   684  // Tests that an inactive downloader will not accept incoming block headers,
   685  // bodies and receipts.
   686  func TestInactiveDownloader63(t *testing.T) {
   687  	t.Parallel()
   688  
   689  	tester := newTester()
   690  	defer tester.terminate()
   691  
   692  	// Check that neither block headers nor bodies are accepted
   693  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   694  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   695  	}
   696  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   697  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   698  	}
   699  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   700  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   701  	}
   702  }
   703  
   704  // Tests that a canceled download wipes all previously accumulated state.
   705  //func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   706  //func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   707  //func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   708  func TestCancel70Full(t *testing.T)  { testCancel(t, 70, FullSync) }
   709  func TestCancel70Fast(t *testing.T)  { testCancel(t, 70, FastSync) }
   710  func TestCancel70Light(t *testing.T) { testCancel(t, 70, LightSync) }
   711  
   712  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   713  	t.Parallel()
   714  
   715  	tester := newTester()
   716  	defer tester.terminate()
   717  
   718  	chain := testChainBase.shorten(MaxHeaderFetch)
   719  	tester.newPeer("peer", protocol, chain)
   720  
   721  	// Make sure canceling works with a pristine downloader
   722  	tester.downloader.Cancel()
   723  	if !tester.downloader.queue.Idle() {
   724  		t.Errorf("download queue not idle")
   725  	}
   726  	// Synchronise with the peer, but cancel afterwards
   727  	if err := tester.sync("peer", nil, mode); err != nil {
   728  		t.Fatalf("failed to synchronise blocks: %v", err)
   729  	}
   730  	tester.downloader.Cancel()
   731  	if !tester.downloader.queue.Idle() {
   732  		t.Errorf("download queue not idle")
   733  	}
   734  }
   735  
   736  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   737  //func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   738  //func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   739  //func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   740  func TestMultiSynchronisation70Full(t *testing.T)  { testMultiSynchronisation(t, 70, FullSync) }
   741  func TestMultiSynchronisation70Fast(t *testing.T)  { testMultiSynchronisation(t, 70, FastSync) }
   742  func TestMultiSynchronisation70Light(t *testing.T) { testMultiSynchronisation(t, 70, LightSync) }
   743  
   744  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   745  	t.Parallel()
   746  
   747  	tester := newTester()
   748  	defer tester.terminate()
   749  
   750  	// Create various peers with various parts of the chain
   751  	targetPeers := 8
   752  	chain := testChainBase.shorten(targetPeers * 100)
   753  
   754  	for i := 0; i < targetPeers; i++ {
   755  		id := fmt.Sprintf("peer #%d", i)
   756  		tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
   757  	}
   758  	if err := tester.sync("peer #0", nil, mode); err != nil {
   759  		t.Fatalf("failed to synchronise blocks: %v", err)
   760  	}
   761  	assertOwnChain(t, tester, chain.len())
   762  }
   763  
   764  // Tests that synchronisations behave well in multi-version protocol environments
   765  // and not wreak havoc on other nodes in the network.
   766  //func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
   767  //func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
   768  //func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
   769  func TestMultiProtoSynchronisation70Full(t *testing.T)  { testMultiProtoSync(t, 70, FullSync) }
   770  func TestMultiProtoSynchronisation70Fast(t *testing.T)  { testMultiProtoSync(t, 70, FastSync) }
   771  func TestMultiProtoSynchronisation70Light(t *testing.T) { testMultiProtoSync(t, 70, LightSync) }
   772  
   773  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
   774  	t.Parallel()
   775  
   776  	tester := newTester()
   777  	defer tester.terminate()
   778  
   779  	// Create a small enough block chain to download
   780  	chain := testChainBase.shorten(blockCacheItems - 15)
   781  
   782  	// Create peers of every type
   783  	//tester.newPeer("peer 62", 62, chain)
   784  	//tester.newPeer("peer 63", 63, chain)
   785  	//tester.newPeer("peer 64", 64, chain)
   786  	tester.newPeer("peer 70", 70, chain)
   787  
   788  	// Synchronise with the requested peer and make sure all blocks were retrieved
   789  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   790  		t.Fatalf("failed to synchronise blocks: %v", err)
   791  	}
   792  	assertOwnChain(t, tester, chain.len())
   793  
   794  	// Check that no peers have been dropped off
   795  	for _, version := range []int{70} {
   796  		peer := fmt.Sprintf("peer %d", version)
   797  		if _, ok := tester.peers[peer]; !ok {
   798  			t.Errorf("%s dropped", peer)
   799  		}
   800  	}
   801  }
   802  
   803  // Tests that if a block is empty (e.g. header only), no body request should be
   804  // made, and instead the header should be assembled into a whole block in itself.
   805  //func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
   806  //func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
   807  //func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
   808  func TestEmptyShortCircuit70Full(t *testing.T)  { testEmptyShortCircuit(t, 70, FullSync) }
   809  func TestEmptyShortCircuit70Fast(t *testing.T)  { testEmptyShortCircuit(t, 70, FastSync) }
   810  func TestEmptyShortCircuit70Light(t *testing.T) { testEmptyShortCircuit(t, 70, LightSync) }
   811  
   812  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
   813  	t.Parallel()
   814  
   815  	tester := newTester()
   816  	defer tester.terminate()
   817  
   818  	// Create a block chain to download
   819  	chain := testChainBase
   820  	tester.newPeer("peer", protocol, chain)
   821  
   822  	// Instrument the downloader to signal body requests
   823  	bodiesHave, receiptsHave := int32(0), int32(0)
   824  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   825  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   826  	}
   827  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   828  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   829  	}
   830  	// Synchronise with the peer and make sure all blocks were retrieved
   831  	if err := tester.sync("peer", nil, mode); err != nil {
   832  		t.Fatalf("failed to synchronise blocks: %v", err)
   833  	}
   834  	assertOwnChain(t, tester, chain.len())
   835  
   836  	// Validate the number of block bodies that should have been requested
   837  	bodiesNeeded, receiptsNeeded := 0, 0
   838  	for _, block := range chain.blockm {
   839  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   840  			bodiesNeeded++
   841  		}
   842  	}
   843  	for _, receipt := range chain.receiptm {
   844  		if mode == FastSync && len(receipt) > 0 {
   845  			receiptsNeeded++
   846  		}
   847  	}
   848  	if int(bodiesHave) != bodiesNeeded {
   849  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   850  	}
   851  	if int(receiptsHave) != receiptsNeeded {
   852  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   853  	}
   854  }
   855  
   856  // Tests that headers are enqueued continuously, preventing malicious nodes from
   857  // stalling the downloader by feeding gapped header chains.
   858  //func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
   859  //func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
   860  //func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
   861  func TestMissingHeaderAttack70Full(t *testing.T)  { testMissingHeaderAttack(t, 70, FullSync) }
   862  func TestMissingHeaderAttack70Fast(t *testing.T)  { testMissingHeaderAttack(t, 70, FastSync) }
   863  func TestMissingHeaderAttack70Light(t *testing.T) { testMissingHeaderAttack(t, 70, LightSync) }
   864  
   865  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
   866  	t.Parallel()
   867  
   868  	tester := newTester()
   869  	defer tester.terminate()
   870  
   871  	chain := testChainBase.shorten(blockCacheItems - 15)
   872  	brokenChain := chain.shorten(chain.len())
   873  	delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
   874  	tester.newPeer("attack", protocol, brokenChain)
   875  
   876  	if err := tester.sync("attack", nil, mode); err == nil {
   877  		t.Fatalf("succeeded attacker synchronisation")
   878  	}
   879  	// Synchronise with the valid peer and make sure sync succeeds
   880  	tester.newPeer("valid", protocol, chain)
   881  	if err := tester.sync("valid", nil, mode); err != nil {
   882  		t.Fatalf("failed to synchronise blocks: %v", err)
   883  	}
   884  	assertOwnChain(t, tester, chain.len())
   885  }
   886  
   887  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   888  // detects the invalid numbering.
   889  //func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
   890  //func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
   891  //func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
   892  func TestShiftedHeaderAttack70Full(t *testing.T)  { testShiftedHeaderAttack(t, 70, FullSync) }
   893  func TestShiftedHeaderAttack70Fast(t *testing.T)  { testShiftedHeaderAttack(t, 70, FastSync) }
   894  func TestShiftedHeaderAttack70Light(t *testing.T) { testShiftedHeaderAttack(t, 70, LightSync) }
   895  
   896  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
   897  	t.Parallel()
   898  
   899  	tester := newTester()
   900  	defer tester.terminate()
   901  
   902  	chain := testChainBase.shorten(blockCacheItems - 15)
   903  
   904  	// Attempt a full sync with an attacker feeding shifted headers
   905  	brokenChain := chain.shorten(chain.len())
   906  	delete(brokenChain.headerm, brokenChain.chain[1])
   907  	delete(brokenChain.blockm, brokenChain.chain[1])
   908  	delete(brokenChain.receiptm, brokenChain.chain[1])
   909  	tester.newPeer("attack", protocol, brokenChain)
   910  	if err := tester.sync("attack", nil, mode); err == nil {
   911  		t.Fatalf("succeeded attacker synchronisation")
   912  	}
   913  
   914  	// Synchronise with the valid peer and make sure sync succeeds
   915  	tester.newPeer("valid", protocol, chain)
   916  	if err := tester.sync("valid", nil, mode); err != nil {
   917  		t.Fatalf("failed to synchronise blocks: %v", err)
   918  	}
   919  	assertOwnChain(t, tester, chain.len())
   920  }
   921  
   922  // Tests that upon detecting an invalid header, the recent ones are rolled back
   923  // for various failure scenarios. Afterwards a full sync is attempted to make
   924  // sure no state was corrupted.
   925  //func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
   926  func TestInvalidHeaderRollback70Fast(t *testing.T)  { testInvalidHeaderRollback(t, 70, FastSync) }
   927  func TestInvalidHeaderRollback70Light(t *testing.T) { testInvalidHeaderRollback(t, 70, LightSync) }
   928  
   929  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
   930  	t.Parallel()
   931  
   932  	tester := newTester()
   933  	defer tester.terminate()
   934  
   935  	// Create a small enough block chain to download
   936  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
   937  	chain := testChainBase.shorten(targetBlocks)
   938  
   939  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
   940  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
   941  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
   942  	fastAttackChain := chain.shorten(chain.len())
   943  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
   944  	tester.newPeer("fast-attack", protocol, fastAttackChain)
   945  
   946  	if err := tester.sync("fast-attack", nil, mode); err == nil {
   947  		t.Fatalf("succeeded fast attacker synchronisation")
   948  	}
   949  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
   950  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
   951  	}
   952  
   953  	// Attempt to sync with an attacker that feeds junk during the block import phase.
   954  	// This should result in both the last fsHeaderSafetyNet number of headers being
   955  	// rolled back, and also the pivot point being reverted to a non-block status.
   956  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
   957  	blockAttackChain := chain.shorten(chain.len())
   958  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
   959  	delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
   960  	tester.newPeer("block-attack", protocol, blockAttackChain)
   961  
   962  	if err := tester.sync("block-attack", nil, mode); err == nil {
   963  		t.Fatalf("succeeded block attacker synchronisation")
   964  	}
   965  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   966  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   967  	}
   968  	if mode == FastSync {
   969  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
   970  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   971  		}
   972  	}
   973  
   974  	// Attempt to sync with an attacker that withholds promised blocks after the
   975  	// fast sync pivot point. This could be a trial to leave the node with a bad
   976  	// but already imported pivot block.
   977  	withholdAttackChain := chain.shorten(chain.len())
   978  	tester.newPeer("withhold-attack", protocol, withholdAttackChain)
   979  	tester.downloader.syncInitHook = func(uint64, uint64) {
   980  		for i := missing; i < withholdAttackChain.len(); i++ {
   981  			delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
   982  		}
   983  		tester.downloader.syncInitHook = nil
   984  	}
   985  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
   986  		t.Fatalf("succeeded withholding attacker synchronisation")
   987  	}
   988  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   989  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   990  	}
   991  	if mode == FastSync {
   992  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
   993  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   994  		}
   995  	}
   996  
   997  	// synchronise with the valid peer and make sure sync succeeds. Since the last rollback
   998  	// should also disable fast syncing for this process, verify that we did a fresh full
   999  	// sync. Note, we can't assert anything about the receipts since we won't purge the
  1000  	// database of them, hence we can't use assertOwnChain.
  1001  	tester.newPeer("valid", protocol, chain)
  1002  	if err := tester.sync("valid", nil, mode); err != nil {
  1003  		t.Fatalf("failed to synchronise blocks: %v", err)
  1004  	}
  1005  	if hs := len(tester.ownHeaders); hs != chain.len() {
  1006  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
  1007  	}
  1008  	if mode != LightSync {
  1009  		if bs := len(tester.ownBlocks); bs != chain.len() {
  1010  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
  1011  		}
  1012  	}
  1013  }
  1014  
  1015  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1016  // afterwards by not sending any useful hashes.
  1017  //func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1018  //func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1019  //func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1020  func TestHighTDStarvationAttack70Full(t *testing.T)  { testHighTDStarvationAttack(t, 70, FullSync) }
  1021  func TestHighTDStarvationAttack70Fast(t *testing.T)  { testHighTDStarvationAttack(t, 70, FastSync) }
  1022  func TestHighTDStarvationAttack70Light(t *testing.T) { testHighTDStarvationAttack(t, 70, LightSync) }
  1023  
  1024  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1025  	t.Parallel()
  1026  
  1027  	tester := newTester()
  1028  	defer tester.terminate()
  1029  
  1030  	chain := testChainBase.shorten(1)
  1031  	tester.newPeer("attack", protocol, chain)
  1032  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1033  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1034  	}
  1035  }
  1036  
  1037  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1038  //func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1039  //func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1040  func TestBlockHeaderAttackerDropping70(t *testing.T) { testBlockHeaderAttackerDropping(t, 70) }
  1041  
  1042  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1043  	t.Parallel()
  1044  
  1045  	// Define the disconnection requirement for individual hash fetch errors
  1046  	tests := []struct {
  1047  		result error
  1048  		drop   bool
  1049  	}{
  1050  		{nil, false},                        // Sync succeeded, all is well
  1051  		{errBusy, false},                    // Sync is already in progress, no problem
  1052  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1053  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1054  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1055  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
  1056  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1057  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1058  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1059  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1060  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1061  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1062  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1063  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1064  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1065  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1066  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1067  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1068  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1069  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1070  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1071  	}
  1072  	// Run the tests and check disconnection status
  1073  	tester := newTester()
  1074  	defer tester.terminate()
  1075  	chain := testChainBase.shorten(1)
  1076  
  1077  	for i, tt := range tests {
  1078  		// Register a new peer and ensure it's presence
  1079  		id := fmt.Sprintf("test %d", i)
  1080  		if err := tester.newPeer(id, protocol, chain); err != nil {
  1081  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1082  		}
  1083  		if _, ok := tester.peers[id]; !ok {
  1084  			t.Fatalf("test %d: registered peer not found", i)
  1085  		}
  1086  		// Simulate a synchronisation and check the required result
  1087  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1088  
  1089  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1090  		if _, ok := tester.peers[id]; !ok != tt.drop {
  1091  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1092  		}
  1093  	}
  1094  }
  1095  
  1096  // Tests that synchronisation progress (origin block number, current block number
  1097  // and highest block number) is tracked and updated correctly.
  1098  //func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1099  //func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1100  //func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1101  func TestSyncProgress70Full(t *testing.T)  { testSyncProgress(t, 70, FullSync) }
  1102  func TestSyncProgress70Fast(t *testing.T)  { testSyncProgress(t, 70, FastSync) }
  1103  func TestSyncProgress70Light(t *testing.T) { testSyncProgress(t, 70, LightSync) }
  1104  
  1105  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1106  	t.Parallel()
  1107  
  1108  	tester := newTester()
  1109  	defer tester.terminate()
  1110  	chain := testChainBase.shorten(blockCacheItems - 15)
  1111  
  1112  	// Set a sync init hook to catch progress changes
  1113  	starting := make(chan struct{})
  1114  	progress := make(chan struct{})
  1115  
  1116  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1117  		starting <- struct{}{}
  1118  		<-progress
  1119  	}
  1120  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1121  
  1122  	// Synchronise half the blocks and check initial progress
  1123  	tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1124  	pending := new(sync.WaitGroup)
  1125  	pending.Add(1)
  1126  
  1127  	go func() {
  1128  		defer pending.Done()
  1129  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1130  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1131  		}
  1132  	}()
  1133  	<-starting
  1134  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1135  		HighestBlock: uint64(chain.len()/2 - 1),
  1136  	})
  1137  	progress <- struct{}{}
  1138  	pending.Wait()
  1139  
  1140  	// Synchronise all the blocks and check continuation progress
  1141  	tester.newPeer("peer-full", protocol, chain)
  1142  	pending.Add(1)
  1143  	go func() {
  1144  		defer pending.Done()
  1145  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1146  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1147  		}
  1148  	}()
  1149  	<-starting
  1150  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1151  		StartingBlock: uint64(chain.len()/2 - 1),
  1152  		CurrentBlock:  uint64(chain.len()/2 - 1),
  1153  		HighestBlock:  uint64(chain.len() - 1),
  1154  	})
  1155  
  1156  	// Check final progress after successful sync
  1157  	progress <- struct{}{}
  1158  	pending.Wait()
  1159  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1160  		StartingBlock: uint64(chain.len()/2 - 1),
  1161  		CurrentBlock:  uint64(chain.len() - 1),
  1162  		HighestBlock:  uint64(chain.len() - 1),
  1163  	})
  1164  }
  1165  
  1166  func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1167  	// Mark this method as a helper to report errors at callsite, not in here
  1168  	t.Helper()
  1169  
  1170  	p := d.Progress()
  1171  	p.KnownStates, p.PulledStates = 0, 0
  1172  	want.KnownStates, want.PulledStates = 0, 0
  1173  	if p != want {
  1174  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1175  	}
  1176  }
  1177  
  1178  // Tests that synchronisation progress (origin block number and highest block
  1179  // number) is tracked and updated correctly in case of a fork (or manual head
  1180  // revertal).
  1181  //func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1182  //func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1183  //func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1184  func TestForkedSyncProgress70Full(t *testing.T)  { testForkedSyncProgress(t, 70, FullSync) }
  1185  func TestForkedSyncProgress70Fast(t *testing.T)  { testForkedSyncProgress(t, 70, FastSync) }
  1186  func TestForkedSyncProgress70Light(t *testing.T) { testForkedSyncProgress(t, 70, LightSync) }
  1187  
  1188  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1189  	t.Parallel()
  1190  
  1191  	tester := newTester()
  1192  	defer tester.terminate()
  1193  	chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch)
  1194  	chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch)
  1195  
  1196  	// Set a sync init hook to catch progress changes
  1197  	starting := make(chan struct{})
  1198  	progress := make(chan struct{})
  1199  
  1200  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1201  		starting <- struct{}{}
  1202  		<-progress
  1203  	}
  1204  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1205  
  1206  	// Synchronise with one of the forks and check progress
  1207  	tester.newPeer("fork A", protocol, chainA)
  1208  	pending := new(sync.WaitGroup)
  1209  	pending.Add(1)
  1210  	go func() {
  1211  		defer pending.Done()
  1212  		if err := tester.sync("fork A", nil, mode); err != nil {
  1213  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1214  		}
  1215  	}()
  1216  	<-starting
  1217  
  1218  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1219  		HighestBlock: uint64(chainA.len() - 1),
  1220  	})
  1221  	progress <- struct{}{}
  1222  	pending.Wait()
  1223  
  1224  	// Simulate a successful sync above the fork
  1225  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1226  
  1227  	// Synchronise with the second fork and check progress resets
  1228  	tester.newPeer("fork B", protocol, chainB)
  1229  	pending.Add(1)
  1230  	go func() {
  1231  		defer pending.Done()
  1232  		if err := tester.sync("fork B", nil, mode); err != nil {
  1233  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1234  		}
  1235  	}()
  1236  	<-starting
  1237  	checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1238  		StartingBlock: uint64(testChainBase.len()) - 1,
  1239  		CurrentBlock:  uint64(chainA.len() - 1),
  1240  		HighestBlock:  uint64(chainB.len() - 1),
  1241  	})
  1242  
  1243  	// Check final progress after successful sync
  1244  	progress <- struct{}{}
  1245  	pending.Wait()
  1246  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1247  		StartingBlock: uint64(testChainBase.len()) - 1,
  1248  		CurrentBlock:  uint64(chainB.len() - 1),
  1249  		HighestBlock:  uint64(chainB.len() - 1),
  1250  	})
  1251  }
  1252  
  1253  // Tests that if synchronisation is aborted due to some failure, then the progress
  1254  // origin is not updated in the next sync cycle, as it should be considered the
  1255  // continuation of the previous sync and not a new instance.
  1256  //func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1257  //func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1258  //func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1259  func TestFailedSyncProgress70Full(t *testing.T)  { testFailedSyncProgress(t, 70, FullSync) }
  1260  func TestFailedSyncProgress70Fast(t *testing.T)  { testFailedSyncProgress(t, 70, FastSync) }
  1261  func TestFailedSyncProgress70Light(t *testing.T) { testFailedSyncProgress(t, 70, LightSync) }
  1262  
  1263  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1264  	t.Parallel()
  1265  
  1266  	tester := newTester()
  1267  	defer tester.terminate()
  1268  	chain := testChainBase.shorten(blockCacheItems - 15)
  1269  
  1270  	// Set a sync init hook to catch progress changes
  1271  	starting := make(chan struct{})
  1272  	progress := make(chan struct{})
  1273  
  1274  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1275  		starting <- struct{}{}
  1276  		<-progress
  1277  	}
  1278  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1279  
  1280  	// Attempt a full sync with a faulty peer
  1281  	brokenChain := chain.shorten(chain.len())
  1282  	missing := brokenChain.len() / 2
  1283  	delete(brokenChain.headerm, brokenChain.chain[missing])
  1284  	delete(brokenChain.blockm, brokenChain.chain[missing])
  1285  	delete(brokenChain.receiptm, brokenChain.chain[missing])
  1286  	tester.newPeer("faulty", protocol, brokenChain)
  1287  
  1288  	pending := new(sync.WaitGroup)
  1289  	pending.Add(1)
  1290  	go func() {
  1291  		defer pending.Done()
  1292  		if err := tester.sync("faulty", nil, mode); err == nil {
  1293  			panic("succeeded faulty synchronisation")
  1294  		}
  1295  	}()
  1296  	<-starting
  1297  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1298  		HighestBlock: uint64(brokenChain.len() - 1),
  1299  	})
  1300  	progress <- struct{}{}
  1301  	pending.Wait()
  1302  	afterFailedSync := tester.downloader.Progress()
  1303  
  1304  	// Synchronise with a good peer and check that the progress origin remind the same
  1305  	// after a failure
  1306  	tester.newPeer("valid", protocol, chain)
  1307  	pending.Add(1)
  1308  	go func() {
  1309  		defer pending.Done()
  1310  		if err := tester.sync("valid", nil, mode); err != nil {
  1311  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1312  		}
  1313  	}()
  1314  	<-starting
  1315  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1316  
  1317  	// Check final progress after successful sync
  1318  	progress <- struct{}{}
  1319  	pending.Wait()
  1320  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1321  		CurrentBlock: uint64(chain.len() - 1),
  1322  		HighestBlock: uint64(chain.len() - 1),
  1323  	})
  1324  }
  1325  
  1326  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1327  // the progress height is successfully reduced at the next sync invocation.
  1328  //func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1329  //func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1330  //func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1331  func TestFakedSyncProgress70Full(t *testing.T)  { testFakedSyncProgress(t, 70, FullSync) }
  1332  func TestFakedSyncProgress70Fast(t *testing.T)  { testFakedSyncProgress(t, 70, FastSync) }
  1333  func TestFakedSyncProgress70Light(t *testing.T) { testFakedSyncProgress(t, 70, LightSync) }
  1334  
  1335  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1336  	t.Parallel()
  1337  
  1338  	tester := newTester()
  1339  	defer tester.terminate()
  1340  	chain := testChainBase.shorten(blockCacheItems - 15)
  1341  
  1342  	// Set a sync init hook to catch progress changes
  1343  	starting := make(chan struct{})
  1344  	progress := make(chan struct{})
  1345  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1346  		starting <- struct{}{}
  1347  		<-progress
  1348  	}
  1349  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1350  
  1351  	// Create and sync with an attacker that promises a higher chain than available.
  1352  	brokenChain := chain.shorten(chain.len())
  1353  	numMissing := 5
  1354  	for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1355  		delete(brokenChain.headerm, brokenChain.chain[i])
  1356  	}
  1357  	tester.newPeer("attack", protocol, brokenChain)
  1358  
  1359  	pending := new(sync.WaitGroup)
  1360  	pending.Add(1)
  1361  	go func() {
  1362  		defer pending.Done()
  1363  		if err := tester.sync("attack", nil, mode); err == nil {
  1364  			panic("succeeded attacker synchronisation")
  1365  		}
  1366  	}()
  1367  	<-starting
  1368  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1369  		HighestBlock: uint64(brokenChain.len() - 1),
  1370  	})
  1371  	progress <- struct{}{}
  1372  	pending.Wait()
  1373  	afterFailedSync := tester.downloader.Progress()
  1374  
  1375  	// Synchronise with a good peer and check that the progress height has been reduced to
  1376  	// the true value.
  1377  	validChain := chain.shorten(chain.len() - numMissing)
  1378  	tester.newPeer("valid", protocol, validChain)
  1379  	pending.Add(1)
  1380  
  1381  	go func() {
  1382  		defer pending.Done()
  1383  		if err := tester.sync("valid", nil, mode); err != nil {
  1384  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1385  		}
  1386  	}()
  1387  	<-starting
  1388  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1389  		CurrentBlock: afterFailedSync.CurrentBlock,
  1390  		HighestBlock: uint64(validChain.len() - 1),
  1391  	})
  1392  
  1393  	// Check final progress after successful sync.
  1394  	progress <- struct{}{}
  1395  	pending.Wait()
  1396  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1397  		CurrentBlock: uint64(validChain.len() - 1),
  1398  		HighestBlock: uint64(validChain.len() - 1),
  1399  	})
  1400  }
  1401  
  1402  // This test reproduces an issue where unexpected deliveries would
  1403  // block indefinitely if they arrived at the right time.
  1404  func TestDeliverHeadersHang(t *testing.T) {
  1405  	t.Parallel()
  1406  
  1407  	testCases := []struct {
  1408  		protocol int
  1409  		syncMode SyncMode
  1410  	}{
  1411  		//{62, FullSync},
  1412  		//{63, FullSync},
  1413  		//{63, FastSync},
  1414  		{70, FullSync},
  1415  		{70, FastSync},
  1416  		{70, LightSync},
  1417  	}
  1418  	for _, tc := range testCases {
  1419  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1420  			t.Parallel()
  1421  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1422  		})
  1423  	}
  1424  }
  1425  
  1426  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1427  	master := newTester()
  1428  	defer master.terminate()
  1429  	chain := testChainBase.shorten(15)
  1430  
  1431  	for i := 0; i < 200; i++ {
  1432  		tester := newTester()
  1433  		tester.peerDb = master.peerDb
  1434  		tester.newPeer("peer", protocol, chain)
  1435  
  1436  		// Whenever the downloader requests headers, flood it with
  1437  		// a lot of unrequested header deliveries.
  1438  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1439  			peer:   tester.downloader.peers.peers["peer"].peer,
  1440  			tester: tester,
  1441  		}
  1442  		if err := tester.sync("peer", nil, mode); err != nil {
  1443  			t.Errorf("test %d: sync failed: %v", i, err)
  1444  		}
  1445  		tester.terminate()
  1446  	}
  1447  }
  1448  
  1449  type floodingTestPeer struct {
  1450  	peer   Peer
  1451  	tester *downloadTester
  1452  }
  1453  
  1454  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1455  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1456  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1457  }
  1458  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1459  	return ftp.peer.RequestBodies(hashes)
  1460  }
  1461  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1462  	return ftp.peer.RequestReceipts(hashes)
  1463  }
  1464  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1465  	return ftp.peer.RequestNodeData(hashes)
  1466  }
  1467  
  1468  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1469  	deliveriesDone := make(chan struct{}, 500)
  1470  	for i := 0; i < cap(deliveriesDone)-1; i++ {
  1471  		peer := fmt.Sprintf("fake-peer%d", i)
  1472  		go func() {
  1473  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1474  			deliveriesDone <- struct{}{}
  1475  		}()
  1476  	}
  1477  
  1478  	// None of the extra deliveries should block.
  1479  	timeout := time.After(60 * time.Second)
  1480  	launched := false
  1481  	for i := 0; i < cap(deliveriesDone); i++ {
  1482  		select {
  1483  		case <-deliveriesDone:
  1484  			if !launched {
  1485  				// Start delivering the requested headers
  1486  				// after one of the flooding responses has arrived.
  1487  				go func() {
  1488  					ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1489  					deliveriesDone <- struct{}{}
  1490  				}()
  1491  				launched = true
  1492  			}
  1493  		case <-timeout:
  1494  			panic("blocked")
  1495  		}
  1496  	}
  1497  	return nil
  1498  }
  1499  
  1500  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1501  	testCases := []struct {
  1502  		remoteHeight uint64
  1503  		localHeight  uint64
  1504  		expected     []int
  1505  	}{
  1506  		// Remote is way higher. We should ask for the remote head and go backwards
  1507  		{1500, 1000,
  1508  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1509  		},
  1510  		{15000, 13006,
  1511  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1512  		},
  1513  		//Remote is pretty close to us. We don't have to fetch as many
  1514  		{1200, 1150,
  1515  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1516  		},
  1517  		// Remote is equal to us (so on a fork with higher td)
  1518  		// We should get the closest couple of ancestors
  1519  		{1500, 1500,
  1520  			[]int{1497, 1499},
  1521  		},
  1522  		// We're higher than the remote! Odd
  1523  		{1000, 1500,
  1524  			[]int{997, 999},
  1525  		},
  1526  		// Check some weird edgecases that it behaves somewhat rationally
  1527  		{0, 1500,
  1528  			[]int{0, 2},
  1529  		},
  1530  		{6000000, 0,
  1531  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1532  		},
  1533  		{0, 0,
  1534  			[]int{0, 2},
  1535  		},
  1536  	}
  1537  	reqs := func(from, count, span int) []int {
  1538  		var r []int
  1539  		num := from
  1540  		for len(r) < count {
  1541  			r = append(r, num)
  1542  			num += span + 1
  1543  		}
  1544  		return r
  1545  	}
  1546  	for i, tt := range testCases {
  1547  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1548  		data := reqs(int(from), count, span)
  1549  
  1550  		if max != uint64(data[len(data)-1]) {
  1551  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1552  		}
  1553  		failed := false
  1554  		if len(data) != len(tt.expected) {
  1555  			failed = true
  1556  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1557  		} else {
  1558  			for j, n := range data {
  1559  				if n != tt.expected[j] {
  1560  					failed = true
  1561  					break
  1562  				}
  1563  			}
  1564  		}
  1565  		if failed {
  1566  			res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1567  			exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1568  			fmt.Printf("got: %v\n", res)
  1569  			fmt.Printf("exp: %v\n", exp)
  1570  			t.Errorf("test %d: wrong values", i)
  1571  		}
  1572  	}
  1573  }
  1574  
  1575  // Tests that peers below a pre-configured checkpoint block are prevented from
  1576  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1577  //func TestCheckpointEnforcement62(t *testing.T)      { testCheckpointEnforcement(t, 62, FullSync) }
  1578  //func TestCheckpointEnforcement63Full(t *testing.T)  { testCheckpointEnforcement(t, 63, FullSync) }
  1579  //func TestCheckpointEnforcement63Fast(t *testing.T)  { testCheckpointEnforcement(t, 63, FastSync) }
  1580  func TestCheckpointEnforcement70Full(t *testing.T)  { testCheckpointEnforcement(t, 70, FullSync) }
  1581  func TestCheckpointEnforcement70Fast(t *testing.T)  { testCheckpointEnforcement(t, 70, FastSync) }
  1582  func TestCheckpointEnforcement70Light(t *testing.T) { testCheckpointEnforcement(t, 70, LightSync) }
  1583  
  1584  func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) {
  1585  	t.Parallel()
  1586  
  1587  	// Create a new tester with a particular hard coded checkpoint block
  1588  	tester := newTester()
  1589  	defer tester.terminate()
  1590  
  1591  	tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1592  	chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1593  
  1594  	// Attempt to sync with the peer and validate the result
  1595  	tester.newPeer("peer", protocol, chain)
  1596  
  1597  	var expect error
  1598  	if mode == FastSync {
  1599  		expect = errUnsyncedPeer
  1600  	}
  1601  	if err := tester.sync("peer", nil, mode); err != expect {
  1602  		t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1603  	}
  1604  	if mode == FastSync {
  1605  		assertOwnChain(t, tester, 1)
  1606  	} else {
  1607  		assertOwnChain(t, tester, chain.len())
  1608  	}
  1609  }