github.com/ebakus/go-ebakus@v1.0.5-0.20200520105415-dbccef9ec421/eth/downloader/downloader_test.go (about)

     1  // Copyright 2019 The ebakus/go-ebakus Authors
     2  // This file is part of the ebakus/go-ebakus library.
     3  //
     4  // The ebakus/go-ebakus library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The ebakus/go-ebakus library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the ebakus/go-ebakus library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ebakus/go-ebakus"
    30  	"github.com/ebakus/go-ebakus/common"
    31  	"github.com/ebakus/go-ebakus/core/rawdb"
    32  	"github.com/ebakus/go-ebakus/core/types"
    33  	"github.com/ebakus/go-ebakus/ethdb"
    34  	"github.com/ebakus/go-ebakus/event"
    35  	"github.com/ebakus/go-ebakus/trie"
    36  )
    37  
    38  // Reduce some of the parameters to make the tester faster.
    39  func init() {
    40  	maxForkAncestry = 10000
    41  	blockCacheItems = 1024
    42  	fsHeaderContCheck = 500 * time.Millisecond
    43  }
    44  
    45  // downloadTester is a test simulator for mocking out local block chain.
    46  type downloadTester struct {
    47  	downloader *Downloader
    48  
    49  	genesis *types.Block   // Genesis blocks used by the tester and peers
    50  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    51  	peerDb  ethdb.Database // Database of the peers containing all data
    52  	peers   map[string]*downloadTesterPeer
    53  
    54  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    55  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    56  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    57  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    58  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    59  
    60  	ancientHeaders  map[common.Hash]*types.Header  // Ancient headers belonging to the tester
    61  	ancientBlocks   map[common.Hash]*types.Block   // Ancient blocks belonging to the tester
    62  	ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
    63  	ancientChainTd  map[common.Hash]*big.Int       // Ancient total difficulties of the blocks in the local chain
    64  
    65  	lock sync.RWMutex
    66  }
    67  
    68  // newTester creates a new downloader test mocker.
    69  func newTester() *downloadTester {
    70  	tester := &downloadTester{
    71  		genesis:     testGenesis,
    72  		peerDb:      testDB,
    73  		peers:       make(map[string]*downloadTesterPeer),
    74  		ownHashes:   []common.Hash{testGenesis.Hash()},
    75  		ownHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    76  		ownBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    77  		ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    78  		ownChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    79  
    80  		// Initialize ancient store with test genesis block
    81  		ancientHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
    82  		ancientBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
    83  		ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
    84  		ancientChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
    85  	}
    86  	tester.stateDb = rawdb.NewMemoryDatabase()
    87  	tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
    88  
    89  	tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer)
    90  	return tester
    91  }
    92  
    93  // terminate aborts any operations on the embedded downloader and releases all
    94  // held resources.
    95  func (dl *downloadTester) terminate() {
    96  	dl.downloader.Terminate()
    97  }
    98  
    99  // sync starts synchronizing with a remote peer, blocking until it completes.
   100  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   101  	dl.lock.RLock()
   102  	hash := dl.peers[id].chain.headBlock().Hash()
   103  	// If no particular TD was requested, load from the peer's blockchain
   104  	if td == nil {
   105  		td = dl.peers[id].chain.td(hash)
   106  	}
   107  	dl.lock.RUnlock()
   108  
   109  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   110  	err := dl.downloader.synchronise(id, hash, td, mode)
   111  	select {
   112  	case <-dl.downloader.cancelCh:
   113  		// Ok, downloader fully cancelled after sync cycle
   114  	default:
   115  		// Downloader is still accepting packets, can block a peer up
   116  		panic("downloader active post sync cycle") // panic will be caught by tester
   117  	}
   118  	return err
   119  }
   120  
   121  // HasHeader checks if a header is present in the testers canonical chain.
   122  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   123  	return dl.GetHeaderByHash(hash) != nil
   124  }
   125  
   126  // HasBlock checks if a block is present in the testers canonical chain.
   127  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   128  	return dl.GetBlockByHash(hash) != nil
   129  }
   130  
   131  // HasFastBlock checks if a block is present in the testers canonical chain.
   132  func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
   133  	dl.lock.RLock()
   134  	defer dl.lock.RUnlock()
   135  
   136  	if _, ok := dl.ancientReceipts[hash]; ok {
   137  		return true
   138  	}
   139  	_, ok := dl.ownReceipts[hash]
   140  	return ok
   141  }
   142  
   143  // GetHeader retrieves a header from the testers canonical chain.
   144  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   145  	dl.lock.RLock()
   146  	defer dl.lock.RUnlock()
   147  
   148  	header := dl.ancientHeaders[hash]
   149  	if header != nil {
   150  		return header
   151  	}
   152  	return dl.ownHeaders[hash]
   153  }
   154  
   155  // GetBlock retrieves a block from the testers canonical chain.
   156  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   157  	dl.lock.RLock()
   158  	defer dl.lock.RUnlock()
   159  
   160  	block := dl.ancientBlocks[hash]
   161  	if block != nil {
   162  		return block
   163  	}
   164  	return dl.ownBlocks[hash]
   165  }
   166  
   167  // CurrentHeader retrieves the current head header from the canonical chain.
   168  func (dl *downloadTester) CurrentHeader() *types.Header {
   169  	dl.lock.RLock()
   170  	defer dl.lock.RUnlock()
   171  
   172  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   173  		if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
   174  			return header
   175  		}
   176  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   177  			return header
   178  		}
   179  	}
   180  	return dl.genesis.Header()
   181  }
   182  
   183  // CurrentBlock retrieves the current head block from the canonical chain.
   184  func (dl *downloadTester) CurrentBlock() *types.Block {
   185  	dl.lock.RLock()
   186  	defer dl.lock.RUnlock()
   187  
   188  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   189  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   190  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   191  				return block
   192  			}
   193  			return block
   194  		}
   195  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   196  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   197  				return block
   198  			}
   199  		}
   200  	}
   201  	return dl.genesis
   202  }
   203  
   204  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   205  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   206  	dl.lock.RLock()
   207  	defer dl.lock.RUnlock()
   208  
   209  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   210  		if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
   211  			return block
   212  		}
   213  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   214  			return block
   215  		}
   216  	}
   217  	return dl.genesis
   218  }
   219  
   220  // FastSyncCommitHead manually sets the head block to a given hash.
   221  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   222  	// For now only check that the state trie is correct
   223  	if block := dl.GetBlockByHash(hash); block != nil {
   224  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
   225  		return err
   226  	}
   227  	return fmt.Errorf("non existent block: %x", hash[:4])
   228  }
   229  
   230  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   231  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
   232  	dl.lock.Lock()
   233  	defer dl.lock.Unlock()
   234  
   235  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   236  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   237  		return 0, errors.New("unknown parent")
   238  	}
   239  	for i := 1; i < len(headers); i++ {
   240  		if headers[i].ParentHash != headers[i-1].Hash() {
   241  			return i, errors.New("unknown parent")
   242  		}
   243  	}
   244  	// Do a full insert if pre-checks passed
   245  	for i, header := range headers {
   246  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   247  			continue
   248  		}
   249  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   250  			return i, errors.New("unknown parent")
   251  		}
   252  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   253  		dl.ownHeaders[header.Hash()] = header
   254  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   255  	}
   256  	return len(headers), nil
   257  }
   258  
   259  // InsertChain injects a new batch of blocks into the simulated chain.
   260  func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
   261  	dl.lock.Lock()
   262  	defer dl.lock.Unlock()
   263  
   264  	for i, block := range blocks {
   265  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   266  			return i, errors.New("unknown parent")
   267  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   268  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   269  		}
   270  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   271  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   272  			dl.ownHeaders[block.Hash()] = block.Header()
   273  		}
   274  		dl.ownBlocks[block.Hash()] = block
   275  		dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
   276  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   277  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   278  	}
   279  	return len(blocks), nil
   280  }
   281  
   282  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   283  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
   284  	dl.lock.Lock()
   285  	defer dl.lock.Unlock()
   286  
   287  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   288  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   289  			return i, errors.New("unknown owner")
   290  		}
   291  		if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
   292  			if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   293  				return i, errors.New("unknown parent")
   294  			}
   295  		}
   296  		if blocks[i].NumberU64() <= ancientLimit {
   297  			dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
   298  			dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
   299  
   300  			// Migrate from active db to ancient db
   301  			dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
   302  			dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
   303  
   304  			delete(dl.ownHeaders, blocks[i].Hash())
   305  			delete(dl.ownChainTd, blocks[i].Hash())
   306  		} else {
   307  			dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   308  			dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   309  		}
   310  	}
   311  	return len(blocks), nil
   312  }
   313  
   314  // Rollback removes some recently added elements from the chain.
   315  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   316  	dl.lock.Lock()
   317  	defer dl.lock.Unlock()
   318  
   319  	for i := len(hashes) - 1; i >= 0; i-- {
   320  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   321  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   322  		}
   323  		delete(dl.ownChainTd, hashes[i])
   324  		delete(dl.ownHeaders, hashes[i])
   325  		delete(dl.ownReceipts, hashes[i])
   326  		delete(dl.ownBlocks, hashes[i])
   327  
   328  		delete(dl.ancientChainTd, hashes[i])
   329  		delete(dl.ancientHeaders, hashes[i])
   330  		delete(dl.ancientReceipts, hashes[i])
   331  		delete(dl.ancientBlocks, hashes[i])
   332  	}
   333  }
   334  
   335  // newPeer registers a new block download source into the downloader.
   336  func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error {
   337  	dl.lock.Lock()
   338  	defer dl.lock.Unlock()
   339  
   340  	peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
   341  	dl.peers[id] = peer
   342  	return dl.downloader.RegisterPeer(id, version, peer)
   343  }
   344  
   345  // dropPeer simulates a hard peer removal from the connection pool.
   346  func (dl *downloadTester) dropPeer(id string) {
   347  	dl.lock.Lock()
   348  	defer dl.lock.Unlock()
   349  
   350  	delete(dl.peers, id)
   351  	dl.downloader.UnregisterPeer(id)
   352  }
   353  
   354  type downloadTesterPeer struct {
   355  	dl            *downloadTester
   356  	id            string
   357  	lock          sync.RWMutex
   358  	chain         *testChain
   359  	missingStates map[common.Hash]bool // State entries that fast sync should not return
   360  }
   361  
   362  // Head constructs a function to retrieve a peer's current head hash
   363  // and total difficulty.
   364  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   365  	b := dlp.chain.headBlock()
   366  	return b.Hash(), dlp.chain.td(b.Hash())
   367  }
   368  
   369  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   370  // origin; associated with a particular peer in the download tester. The returned
   371  // function can be used to retrieve batches of headers from the particular peer.
   372  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   373  	if reverse {
   374  		panic("reverse header requests not supported")
   375  	}
   376  
   377  	result := dlp.chain.headersByHash(origin, amount, skip)
   378  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   379  	return nil
   380  }
   381  
   382  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   383  // origin; associated with a particular peer in the download tester. The returned
   384  // function can be used to retrieve batches of headers from the particular peer.
   385  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   386  	if reverse {
   387  		panic("reverse header requests not supported")
   388  	}
   389  
   390  	result := dlp.chain.headersByNumber(origin, amount, skip)
   391  	go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   392  	return nil
   393  }
   394  
   395  // RequestBodies constructs a getBlockBodies method associated with a particular
   396  // peer in the download tester. The returned function can be used to retrieve
   397  // batches of block bodies from the particularly requested peer.
   398  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   399  	txs, _ := dlp.chain.bodies(hashes)
   400  	go dlp.dl.downloader.DeliverBodies(dlp.id, txs)
   401  	return nil
   402  }
   403  
   404  // RequestReceipts constructs a getReceipts method associated with a particular
   405  // peer in the download tester. The returned function can be used to retrieve
   406  // batches of block receipts from the particularly requested peer.
   407  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   408  	receipts := dlp.chain.receipts(hashes)
   409  	go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
   410  	return nil
   411  }
   412  
   413  // RequestNodeData constructs a getNodeData method associated with a particular
   414  // peer in the download tester. The returned function can be used to retrieve
   415  // batches of node state data from the particularly requested peer.
   416  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   417  	dlp.dl.lock.RLock()
   418  	defer dlp.dl.lock.RUnlock()
   419  
   420  	results := make([][]byte, 0, len(hashes))
   421  	for _, hash := range hashes {
   422  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   423  			if !dlp.missingStates[hash] {
   424  				results = append(results, data)
   425  			}
   426  		}
   427  	}
   428  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   429  	return nil
   430  }
   431  
   432  // assertOwnChain checks if the local chain contains the correct number of items
   433  // of the various chain components.
   434  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   435  	// Mark this method as a helper to report errors at callsite, not in here
   436  	t.Helper()
   437  
   438  	assertOwnForkedChain(t, tester, 1, []int{length})
   439  }
   440  
   441  // assertOwnForkedChain checks if the local forked chain contains the correct
   442  // number of items of the various chain components.
   443  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   444  	// Mark this method as a helper to report errors at callsite, not in here
   445  	t.Helper()
   446  
   447  	// Initialize the counters for the first fork
   448  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
   449  
   450  	// Update the counters for each subsequent fork
   451  	for _, length := range lengths[1:] {
   452  		headers += length - common
   453  		blocks += length - common
   454  		receipts += length - common
   455  	}
   456  	if tester.downloader.mode == LightSync {
   457  		blocks, receipts = 1, 1
   458  	}
   459  	if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
   460  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   461  	}
   462  	if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
   463  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   464  	}
   465  	if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
   466  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   467  	}
   468  }
   469  
   470  // Tests that simple synchronization against a canonical chain works correctly.
   471  // In this test common ancestor lookup should be short circuited and not require
   472  // binary searching.
   473  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   474  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   475  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   476  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   477  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   478  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   479  
   480  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   481  	t.Parallel()
   482  
   483  	tester := newTester()
   484  	defer tester.terminate()
   485  
   486  	// Create a small enough block chain to download
   487  	chain := testChainBase.shorten(blockCacheItems - 15)
   488  	tester.newPeer("peer", protocol, chain)
   489  
   490  	// Synchronise with the peer and make sure all relevant data was retrieved
   491  	if err := tester.sync("peer", nil, mode); err != nil {
   492  		t.Fatalf("failed to synchronise blocks: %v", err)
   493  	}
   494  	assertOwnChain(t, tester, chain.len())
   495  }
   496  
   497  // Tests that if a large batch of blocks are being downloaded, it is throttled
   498  // until the cached blocks are retrieved.
   499  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   500  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   501  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   502  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   503  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   504  
   505  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   506  	t.Parallel()
   507  	tester := newTester()
   508  	defer tester.terminate()
   509  
   510  	// Create a long block chain to download and the tester
   511  	targetBlocks := testChainBase.len() - 1
   512  	tester.newPeer("peer", protocol, testChainBase)
   513  
   514  	// Wrap the importer to allow stepping
   515  	blocked, proceed := uint32(0), make(chan struct{})
   516  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   517  		atomic.StoreUint32(&blocked, uint32(len(results)))
   518  		<-proceed
   519  	}
   520  	// Start a synchronisation concurrently
   521  	errc := make(chan error)
   522  	go func() {
   523  		errc <- tester.sync("peer", nil, mode)
   524  	}()
   525  	// Iteratively take some blocks, always checking the retrieval count
   526  	for {
   527  		// Check the retrieval count synchronously (! reason for this ugly block)
   528  		tester.lock.RLock()
   529  		retrieved := len(tester.ownBlocks)
   530  		tester.lock.RUnlock()
   531  		if retrieved >= targetBlocks+1 {
   532  			break
   533  		}
   534  		// Wait a bit for sync to throttle itself
   535  		var cached, frozen int
   536  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   537  			time.Sleep(25 * time.Millisecond)
   538  
   539  			tester.lock.Lock()
   540  			tester.downloader.queue.lock.Lock()
   541  			cached = len(tester.downloader.queue.blockDonePool)
   542  			if mode == FastSync {
   543  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   544  					cached = receipts
   545  				}
   546  			}
   547  			frozen = int(atomic.LoadUint32(&blocked))
   548  			retrieved = len(tester.ownBlocks)
   549  			tester.downloader.queue.lock.Unlock()
   550  			tester.lock.Unlock()
   551  
   552  			if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   553  				break
   554  			}
   555  		}
   556  		// Make sure we filled up the cache, then exhaust it
   557  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   558  
   559  		tester.lock.RLock()
   560  		retrieved = len(tester.ownBlocks)
   561  		tester.lock.RUnlock()
   562  		if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   563  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   564  		}
   565  		// Permit the blocked blocks to import
   566  		if atomic.LoadUint32(&blocked) > 0 {
   567  			atomic.StoreUint32(&blocked, uint32(0))
   568  			proceed <- struct{}{}
   569  		}
   570  	}
   571  	// Check that we haven't pulled more blocks than available
   572  	assertOwnChain(t, tester, targetBlocks+1)
   573  	if err := <-errc; err != nil {
   574  		t.Fatalf("block synchronization failed: %v", err)
   575  	}
   576  }
   577  
   578  // Tests that simple synchronization against a forked chain works correctly. In
   579  // this test common ancestor lookup should *not* be short circuited, and a full
   580  // binary search should be executed.
   581  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   582  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   583  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   584  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   585  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   586  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   587  
   588  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   589  	t.Parallel()
   590  
   591  	tester := newTester()
   592  	defer tester.terminate()
   593  
   594  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   595  	chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
   596  	tester.newPeer("fork A", protocol, chainA)
   597  	tester.newPeer("fork B", protocol, chainB)
   598  
   599  	// Synchronise with the peer and make sure all blocks were retrieved
   600  	if err := tester.sync("fork A", nil, mode); err != nil {
   601  		t.Fatalf("failed to synchronise blocks: %v", err)
   602  	}
   603  	assertOwnChain(t, tester, chainA.len())
   604  
   605  	// Synchronise with the second peer and make sure that fork is pulled too
   606  	if err := tester.sync("fork B", nil, mode); err != nil {
   607  		t.Fatalf("failed to synchronise blocks: %v", err)
   608  	}
   609  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   610  }
   611  
   612  // Tests that synchronising against a much shorter but much heavyer fork works
   613  // corrently and is not dropped.
   614  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   615  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   616  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   617  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   618  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   619  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   620  
   621  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   622  	t.Parallel()
   623  
   624  	tester := newTester()
   625  	defer tester.terminate()
   626  
   627  	chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
   628  	chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
   629  	tester.newPeer("light", protocol, chainA)
   630  	tester.newPeer("heavy", protocol, chainB)
   631  
   632  	// Synchronise with the peer and make sure all blocks were retrieved
   633  	if err := tester.sync("light", nil, mode); err != nil {
   634  		t.Fatalf("failed to synchronise blocks: %v", err)
   635  	}
   636  	assertOwnChain(t, tester, chainA.len())
   637  
   638  	// Synchronise with the second peer and make sure that fork is pulled too
   639  	if err := tester.sync("heavy", nil, mode); err != nil {
   640  		t.Fatalf("failed to synchronise blocks: %v", err)
   641  	}
   642  	assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
   643  }
   644  
   645  // Tests that chain forks are contained within a certain interval of the current
   646  // chain head, ensuring that malicious peers cannot waste resources by feeding
   647  // long dead chains.
   648  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   649  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   650  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   651  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   652  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   653  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   654  
   655  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   656  	t.Parallel()
   657  
   658  	tester := newTester()
   659  	defer tester.terminate()
   660  
   661  	chainA := testChainForkLightA
   662  	chainB := testChainForkLightB
   663  	tester.newPeer("original", protocol, chainA)
   664  	tester.newPeer("rewriter", protocol, chainB)
   665  
   666  	// Synchronise with the peer and make sure all blocks were retrieved
   667  	if err := tester.sync("original", nil, mode); err != nil {
   668  		t.Fatalf("failed to synchronise blocks: %v", err)
   669  	}
   670  	assertOwnChain(t, tester, chainA.len())
   671  
   672  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   673  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   674  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   675  	}
   676  }
   677  
   678  // Tests that chain forks are contained within a certain interval of the current
   679  // chain head for short but heavy forks too. These are a bit special because they
   680  // take different ancestor lookup paths.
   681  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   682  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   683  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   684  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   685  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   686  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   687  
   688  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   689  	t.Parallel()
   690  
   691  	tester := newTester()
   692  	defer tester.terminate()
   693  
   694  	// Create a long enough forked chain
   695  	chainA := testChainForkLightA
   696  	chainB := testChainForkHeavy
   697  	tester.newPeer("original", protocol, chainA)
   698  	tester.newPeer("heavy-rewriter", protocol, chainB)
   699  
   700  	// Synchronise with the peer and make sure all blocks were retrieved
   701  	if err := tester.sync("original", nil, mode); err != nil {
   702  		t.Fatalf("failed to synchronise blocks: %v", err)
   703  	}
   704  	assertOwnChain(t, tester, chainA.len())
   705  
   706  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   707  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   708  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   709  	}
   710  }
   711  
   712  // Tests that an inactive downloader will not accept incoming block headers and
   713  // bodies.
   714  func TestInactiveDownloader62(t *testing.T) {
   715  	t.Parallel()
   716  
   717  	tester := newTester()
   718  	defer tester.terminate()
   719  
   720  	// Check that neither block headers nor bodies are accepted
   721  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   722  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   723  	}
   724  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
   725  		t.Errorf("error mismatch: have %v, want  %v", err, errNoSyncActive)
   726  	}
   727  }
   728  
   729  // Tests that an inactive downloader will not accept incoming block headers,
   730  // bodies and receipts.
   731  func TestInactiveDownloader63(t *testing.T) {
   732  	t.Parallel()
   733  
   734  	tester := newTester()
   735  	defer tester.terminate()
   736  
   737  	// Check that neither block headers nor bodies are accepted
   738  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   739  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   740  	}
   741  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
   742  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   743  	}
   744  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   745  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   746  	}
   747  }
   748  
   749  // Tests that a canceled download wipes all previously accumulated state.
   750  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   751  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   752  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   753  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   754  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   755  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   756  
   757  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   758  	t.Parallel()
   759  
   760  	tester := newTester()
   761  	defer tester.terminate()
   762  
   763  	chain := testChainBase.shorten(MaxHeaderFetch)
   764  	tester.newPeer("peer", protocol, chain)
   765  
   766  	// Make sure canceling works with a pristine downloader
   767  	tester.downloader.Cancel()
   768  	if !tester.downloader.queue.Idle() {
   769  		t.Errorf("download queue not idle")
   770  	}
   771  	// Synchronise with the peer, but cancel afterwards
   772  	if err := tester.sync("peer", nil, mode); err != nil {
   773  		t.Fatalf("failed to synchronise blocks: %v", err)
   774  	}
   775  	tester.downloader.Cancel()
   776  	if !tester.downloader.queue.Idle() {
   777  		t.Errorf("download queue not idle")
   778  	}
   779  }
   780  
   781  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   782  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   783  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   784  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   785  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   786  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   787  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   788  
   789  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   790  	t.Parallel()
   791  
   792  	tester := newTester()
   793  	defer tester.terminate()
   794  
   795  	// Create various peers with various parts of the chain
   796  	targetPeers := 8
   797  	chain := testChainBase.shorten(targetPeers * 100)
   798  
   799  	for i := 0; i < targetPeers; i++ {
   800  		id := fmt.Sprintf("peer #%d", i)
   801  		tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
   802  	}
   803  	if err := tester.sync("peer #0", nil, mode); err != nil {
   804  		t.Fatalf("failed to synchronise blocks: %v", err)
   805  	}
   806  	assertOwnChain(t, tester, chain.len())
   807  }
   808  
   809  // Tests that synchronisations behave well in multi-version protocol environments
   810  // and not wreak havoc on other nodes in the network.
   811  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
   812  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
   813  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
   814  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
   815  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
   816  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
   817  
   818  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
   819  	t.Parallel()
   820  
   821  	tester := newTester()
   822  	defer tester.terminate()
   823  
   824  	// Create a small enough block chain to download
   825  	chain := testChainBase.shorten(blockCacheItems - 15)
   826  
   827  	// Create peers of every type
   828  	tester.newPeer("peer 62", 62, chain)
   829  	tester.newPeer("peer 63", 63, chain)
   830  	tester.newPeer("peer 64", 64, chain)
   831  
   832  	// Synchronise with the requested peer and make sure all blocks were retrieved
   833  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   834  		t.Fatalf("failed to synchronise blocks: %v", err)
   835  	}
   836  	assertOwnChain(t, tester, chain.len())
   837  
   838  	// Check that no peers have been dropped off
   839  	for _, version := range []int{62, 63, 64} {
   840  		peer := fmt.Sprintf("peer %d", version)
   841  		if _, ok := tester.peers[peer]; !ok {
   842  			t.Errorf("%s dropped", peer)
   843  		}
   844  	}
   845  }
   846  
   847  // Tests that if a block is empty (e.g. header only), no body request should be
   848  // made, and instead the header should be assembled into a whole block in itself.
   849  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
   850  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
   851  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
   852  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
   853  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
   854  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
   855  
   856  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
   857  	t.Parallel()
   858  
   859  	tester := newTester()
   860  	defer tester.terminate()
   861  
   862  	// Create a block chain to download
   863  	chain := testChainBase
   864  	tester.newPeer("peer", protocol, chain)
   865  
   866  	// Instrument the downloader to signal body requests
   867  	bodiesHave, receiptsHave := int32(0), int32(0)
   868  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   869  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   870  	}
   871  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   872  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   873  	}
   874  	// Synchronise with the peer and make sure all blocks were retrieved
   875  	if err := tester.sync("peer", nil, mode); err != nil {
   876  		t.Fatalf("failed to synchronise blocks: %v", err)
   877  	}
   878  	assertOwnChain(t, tester, chain.len())
   879  
   880  	// Validate the number of block bodies that should have been requested
   881  	bodiesNeeded, receiptsNeeded := 0, 0
   882  	for _, block := range chain.blockm {
   883  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0) {
   884  			bodiesNeeded++
   885  		}
   886  	}
   887  	for _, receipt := range chain.receiptm {
   888  		if mode == FastSync && len(receipt) > 0 {
   889  			receiptsNeeded++
   890  		}
   891  	}
   892  	if int(bodiesHave) != bodiesNeeded {
   893  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   894  	}
   895  	if int(receiptsHave) != receiptsNeeded {
   896  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   897  	}
   898  }
   899  
   900  // Tests that headers are enqueued continuously, preventing malicious nodes from
   901  // stalling the downloader by feeding gapped header chains.
   902  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
   903  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
   904  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
   905  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
   906  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
   907  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
   908  
   909  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
   910  	t.Parallel()
   911  
   912  	tester := newTester()
   913  	defer tester.terminate()
   914  
   915  	chain := testChainBase.shorten(blockCacheItems - 15)
   916  	brokenChain := chain.shorten(chain.len())
   917  	delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
   918  	tester.newPeer("attack", protocol, brokenChain)
   919  
   920  	if err := tester.sync("attack", nil, mode); err == nil {
   921  		t.Fatalf("succeeded attacker synchronisation")
   922  	}
   923  	// Synchronise with the valid peer and make sure sync succeeds
   924  	tester.newPeer("valid", protocol, chain)
   925  	if err := tester.sync("valid", nil, mode); err != nil {
   926  		t.Fatalf("failed to synchronise blocks: %v", err)
   927  	}
   928  	assertOwnChain(t, tester, chain.len())
   929  }
   930  
   931  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   932  // detects the invalid numbering.
   933  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
   934  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
   935  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
   936  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
   937  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
   938  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
   939  
   940  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
   941  	t.Parallel()
   942  
   943  	tester := newTester()
   944  	defer tester.terminate()
   945  
   946  	chain := testChainBase.shorten(blockCacheItems - 15)
   947  
   948  	// Attempt a full sync with an attacker feeding shifted headers
   949  	brokenChain := chain.shorten(chain.len())
   950  	delete(brokenChain.headerm, brokenChain.chain[1])
   951  	delete(brokenChain.blockm, brokenChain.chain[1])
   952  	delete(brokenChain.receiptm, brokenChain.chain[1])
   953  	tester.newPeer("attack", protocol, brokenChain)
   954  	if err := tester.sync("attack", nil, mode); err == nil {
   955  		t.Fatalf("succeeded attacker synchronisation")
   956  	}
   957  
   958  	// Synchronise with the valid peer and make sure sync succeeds
   959  	tester.newPeer("valid", protocol, chain)
   960  	if err := tester.sync("valid", nil, mode); err != nil {
   961  		t.Fatalf("failed to synchronise blocks: %v", err)
   962  	}
   963  	assertOwnChain(t, tester, chain.len())
   964  }
   965  
   966  // Tests that upon detecting an invalid header, the recent ones are rolled back
   967  // for various failure scenarios. Afterwards a full sync is attempted to make
   968  // sure no state was corrupted.
   969  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
   970  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
   971  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
   972  
   973  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
   974  	t.Parallel()
   975  
   976  	tester := newTester()
   977  	defer tester.terminate()
   978  
   979  	// Create a small enough block chain to download
   980  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
   981  	chain := testChainBase.shorten(targetBlocks)
   982  
   983  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
   984  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
   985  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
   986  	fastAttackChain := chain.shorten(chain.len())
   987  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
   988  	tester.newPeer("fast-attack", protocol, fastAttackChain)
   989  
   990  	if err := tester.sync("fast-attack", nil, mode); err == nil {
   991  		t.Fatalf("succeeded fast attacker synchronisation")
   992  	}
   993  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
   994  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
   995  	}
   996  
   997  	// Attempt to sync with an attacker that feeds junk during the block import phase.
   998  	// This should result in both the last fsHeaderSafetyNet number of headers being
   999  	// rolled back, and also the pivot point being reverted to a non-block status.
  1000  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1001  	blockAttackChain := chain.shorten(chain.len())
  1002  	delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
  1003  	delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
  1004  	tester.newPeer("block-attack", protocol, blockAttackChain)
  1005  
  1006  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1007  		t.Fatalf("succeeded block attacker synchronisation")
  1008  	}
  1009  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1010  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1011  	}
  1012  	if mode == FastSync {
  1013  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1014  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1015  		}
  1016  	}
  1017  
  1018  	// Attempt to sync with an attacker that withholds promised blocks after the
  1019  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1020  	// but already imported pivot block.
  1021  	withholdAttackChain := chain.shorten(chain.len())
  1022  	tester.newPeer("withhold-attack", protocol, withholdAttackChain)
  1023  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1024  		for i := missing; i < withholdAttackChain.len(); i++ {
  1025  			delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
  1026  		}
  1027  		tester.downloader.syncInitHook = nil
  1028  	}
  1029  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1030  		t.Fatalf("succeeded withholding attacker synchronisation")
  1031  	}
  1032  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1033  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1034  	}
  1035  	if mode == FastSync {
  1036  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1037  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1038  		}
  1039  	}
  1040  
  1041  	// synchronise with the valid peer and make sure sync succeeds. Since the last rollback
  1042  	// should also disable fast syncing for this process, verify that we did a fresh full
  1043  	// sync. Note, we can't assert anything about the receipts since we won't purge the
  1044  	// database of them, hence we can't use assertOwnChain.
  1045  	tester.newPeer("valid", protocol, chain)
  1046  	if err := tester.sync("valid", nil, mode); err != nil {
  1047  		t.Fatalf("failed to synchronise blocks: %v", err)
  1048  	}
  1049  	if hs := len(tester.ownHeaders); hs != chain.len() {
  1050  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
  1051  	}
  1052  	if mode != LightSync {
  1053  		if bs := len(tester.ownBlocks); bs != chain.len() {
  1054  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
  1055  		}
  1056  	}
  1057  }
  1058  
  1059  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1060  // afterwards by not sending any useful hashes.
  1061  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1062  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1063  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1064  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1065  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1066  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1067  
  1068  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1069  	t.Parallel()
  1070  
  1071  	tester := newTester()
  1072  	defer tester.terminate()
  1073  
  1074  	chain := testChainBase.shorten(1)
  1075  	tester.newPeer("attack", protocol, chain)
  1076  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1077  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1078  	}
  1079  }
  1080  
  1081  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1082  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1083  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1084  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1085  
  1086  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1087  	t.Parallel()
  1088  
  1089  	// Define the disconnection requirement for individual hash fetch errors
  1090  	tests := []struct {
  1091  		result error
  1092  		drop   bool
  1093  	}{
  1094  		{nil, false},                        // Sync succeeded, all is well
  1095  		{errBusy, false},                    // Sync is already in progress, no problem
  1096  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1097  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1098  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1099  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
  1100  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1101  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1102  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1103  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1104  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1105  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1106  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1107  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1108  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1109  	}
  1110  	// Run the tests and check disconnection status
  1111  	tester := newTester()
  1112  	defer tester.terminate()
  1113  	chain := testChainBase.shorten(1)
  1114  
  1115  	for i, tt := range tests {
  1116  		// Register a new peer and ensure it's presence
  1117  		id := fmt.Sprintf("test %d", i)
  1118  		if err := tester.newPeer(id, protocol, chain); err != nil {
  1119  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1120  		}
  1121  		if _, ok := tester.peers[id]; !ok {
  1122  			t.Fatalf("test %d: registered peer not found", i)
  1123  		}
  1124  		// Simulate a synchronisation and check the required result
  1125  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1126  
  1127  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1128  		if _, ok := tester.peers[id]; !ok != tt.drop {
  1129  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1130  		}
  1131  	}
  1132  }
  1133  
  1134  // Tests that synchronisation progress (origin block number, current block number
  1135  // and highest block number) is tracked and updated correctly.
  1136  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1137  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1138  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1139  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1140  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1141  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1142  
  1143  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1144  	t.Parallel()
  1145  
  1146  	tester := newTester()
  1147  	defer tester.terminate()
  1148  	chain := testChainBase.shorten(blockCacheItems - 15)
  1149  
  1150  	// Set a sync init hook to catch progress changes
  1151  	starting := make(chan struct{})
  1152  	progress := make(chan struct{})
  1153  
  1154  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1155  		starting <- struct{}{}
  1156  		<-progress
  1157  	}
  1158  	checkProgress(t, tester.downloader, "pristine", ebakus.SyncProgress{})
  1159  
  1160  	// Synchronise half the blocks and check initial progress
  1161  	tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1162  	pending := new(sync.WaitGroup)
  1163  	pending.Add(1)
  1164  
  1165  	go func() {
  1166  		defer pending.Done()
  1167  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1168  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1169  		}
  1170  	}()
  1171  	<-starting
  1172  	checkProgress(t, tester.downloader, "initial", ebakus.SyncProgress{
  1173  		HighestBlock: uint64(chain.len()/2 - 1),
  1174  	})
  1175  	progress <- struct{}{}
  1176  	pending.Wait()
  1177  
  1178  	// Synchronise all the blocks and check continuation progress
  1179  	tester.newPeer("peer-full", protocol, chain)
  1180  	pending.Add(1)
  1181  	go func() {
  1182  		defer pending.Done()
  1183  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1184  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1185  		}
  1186  	}()
  1187  	<-starting
  1188  	checkProgress(t, tester.downloader, "completing", ebakus.SyncProgress{
  1189  		StartingBlock: uint64(chain.len()/2 - 1),
  1190  		CurrentBlock:  uint64(chain.len()/2 - 1),
  1191  		HighestBlock:  uint64(chain.len() - 1),
  1192  	})
  1193  
  1194  	// Check final progress after successful sync
  1195  	progress <- struct{}{}
  1196  	pending.Wait()
  1197  	checkProgress(t, tester.downloader, "final", ebakus.SyncProgress{
  1198  		StartingBlock: uint64(chain.len()/2 - 1),
  1199  		CurrentBlock:  uint64(chain.len() - 1),
  1200  		HighestBlock:  uint64(chain.len() - 1),
  1201  	})
  1202  }
  1203  
  1204  func checkProgress(t *testing.T, d *Downloader, stage string, want ebakus.SyncProgress) {
  1205  	// Mark this method as a helper to report errors at callsite, not in here
  1206  	t.Helper()
  1207  
  1208  	p := d.Progress()
  1209  	p.KnownStates, p.PulledStates = 0, 0
  1210  	want.KnownStates, want.PulledStates = 0, 0
  1211  	if p != want {
  1212  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1213  	}
  1214  }
  1215  
  1216  // Tests that synchronisation progress (origin block number and highest block
  1217  // number) is tracked and updated correctly in case of a fork (or manual head
  1218  // revertal).
  1219  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1220  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1221  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1222  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1223  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1224  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1225  
  1226  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1227  	t.Parallel()
  1228  
  1229  	tester := newTester()
  1230  	defer tester.terminate()
  1231  	chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch)
  1232  	chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch)
  1233  
  1234  	// Set a sync init hook to catch progress changes
  1235  	starting := make(chan struct{})
  1236  	progress := make(chan struct{})
  1237  
  1238  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1239  		starting <- struct{}{}
  1240  		<-progress
  1241  	}
  1242  	checkProgress(t, tester.downloader, "pristine", ebakus.SyncProgress{})
  1243  
  1244  	// Synchronise with one of the forks and check progress
  1245  	tester.newPeer("fork A", protocol, chainA)
  1246  	pending := new(sync.WaitGroup)
  1247  	pending.Add(1)
  1248  	go func() {
  1249  		defer pending.Done()
  1250  		if err := tester.sync("fork A", nil, mode); err != nil {
  1251  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1252  		}
  1253  	}()
  1254  	<-starting
  1255  
  1256  	checkProgress(t, tester.downloader, "initial", ebakus.SyncProgress{
  1257  		HighestBlock: uint64(chainA.len() - 1),
  1258  	})
  1259  	progress <- struct{}{}
  1260  	pending.Wait()
  1261  
  1262  	// Simulate a successful sync above the fork
  1263  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1264  
  1265  	// Synchronise with the second fork and check progress resets
  1266  	tester.newPeer("fork B", protocol, chainB)
  1267  	pending.Add(1)
  1268  	go func() {
  1269  		defer pending.Done()
  1270  		if err := tester.sync("fork B", nil, mode); err != nil {
  1271  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1272  		}
  1273  	}()
  1274  	<-starting
  1275  	checkProgress(t, tester.downloader, "forking", ebakus.SyncProgress{
  1276  		StartingBlock: uint64(testChainBase.len()) - 1,
  1277  		CurrentBlock:  uint64(chainA.len() - 1),
  1278  		HighestBlock:  uint64(chainB.len() - 1),
  1279  	})
  1280  
  1281  	// Check final progress after successful sync
  1282  	progress <- struct{}{}
  1283  	pending.Wait()
  1284  	checkProgress(t, tester.downloader, "final", ebakus.SyncProgress{
  1285  		StartingBlock: uint64(testChainBase.len()) - 1,
  1286  		CurrentBlock:  uint64(chainB.len() - 1),
  1287  		HighestBlock:  uint64(chainB.len() - 1),
  1288  	})
  1289  }
  1290  
  1291  // Tests that if synchronisation is aborted due to some failure, then the progress
  1292  // origin is not updated in the next sync cycle, as it should be considered the
  1293  // continuation of the previous sync and not a new instance.
  1294  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1295  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1296  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1297  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1298  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1299  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1300  
  1301  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1302  	t.Parallel()
  1303  
  1304  	tester := newTester()
  1305  	defer tester.terminate()
  1306  	chain := testChainBase.shorten(blockCacheItems - 15)
  1307  
  1308  	// Set a sync init hook to catch progress changes
  1309  	starting := make(chan struct{})
  1310  	progress := make(chan struct{})
  1311  
  1312  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1313  		starting <- struct{}{}
  1314  		<-progress
  1315  	}
  1316  	checkProgress(t, tester.downloader, "pristine", ebakus.SyncProgress{})
  1317  
  1318  	// Attempt a full sync with a faulty peer
  1319  	brokenChain := chain.shorten(chain.len())
  1320  	missing := brokenChain.len() / 2
  1321  	delete(brokenChain.headerm, brokenChain.chain[missing])
  1322  	delete(brokenChain.blockm, brokenChain.chain[missing])
  1323  	delete(brokenChain.receiptm, brokenChain.chain[missing])
  1324  	tester.newPeer("faulty", protocol, brokenChain)
  1325  
  1326  	pending := new(sync.WaitGroup)
  1327  	pending.Add(1)
  1328  	go func() {
  1329  		defer pending.Done()
  1330  		if err := tester.sync("faulty", nil, mode); err == nil {
  1331  			panic("succeeded faulty synchronisation")
  1332  		}
  1333  	}()
  1334  	<-starting
  1335  	checkProgress(t, tester.downloader, "initial", ebakus.SyncProgress{
  1336  		HighestBlock: uint64(brokenChain.len() - 1),
  1337  	})
  1338  	progress <- struct{}{}
  1339  	pending.Wait()
  1340  	afterFailedSync := tester.downloader.Progress()
  1341  
  1342  	// Synchronise with a good peer and check that the progress origin remind the same
  1343  	// after a failure
  1344  	tester.newPeer("valid", protocol, chain)
  1345  	pending.Add(1)
  1346  	go func() {
  1347  		defer pending.Done()
  1348  		if err := tester.sync("valid", nil, mode); err != nil {
  1349  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1350  		}
  1351  	}()
  1352  	<-starting
  1353  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1354  
  1355  	// Check final progress after successful sync
  1356  	progress <- struct{}{}
  1357  	pending.Wait()
  1358  	checkProgress(t, tester.downloader, "final", ebakus.SyncProgress{
  1359  		CurrentBlock: uint64(chain.len() - 1),
  1360  		HighestBlock: uint64(chain.len() - 1),
  1361  	})
  1362  }
  1363  
  1364  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1365  // the progress height is successfully reduced at the next sync invocation.
  1366  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1367  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1368  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1369  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1370  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1371  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1372  
  1373  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1374  	t.Parallel()
  1375  
  1376  	tester := newTester()
  1377  	defer tester.terminate()
  1378  	chain := testChainBase.shorten(blockCacheItems - 15)
  1379  
  1380  	// Set a sync init hook to catch progress changes
  1381  	starting := make(chan struct{})
  1382  	progress := make(chan struct{})
  1383  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1384  		starting <- struct{}{}
  1385  		<-progress
  1386  	}
  1387  	checkProgress(t, tester.downloader, "pristine", ebakus.SyncProgress{})
  1388  
  1389  	// Create and sync with an attacker that promises a higher chain than available.
  1390  	brokenChain := chain.shorten(chain.len())
  1391  	numMissing := 5
  1392  	for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1393  		delete(brokenChain.headerm, brokenChain.chain[i])
  1394  	}
  1395  	tester.newPeer("attack", protocol, brokenChain)
  1396  
  1397  	pending := new(sync.WaitGroup)
  1398  	pending.Add(1)
  1399  	go func() {
  1400  		defer pending.Done()
  1401  		if err := tester.sync("attack", nil, mode); err == nil {
  1402  			panic("succeeded attacker synchronisation")
  1403  		}
  1404  	}()
  1405  	<-starting
  1406  	checkProgress(t, tester.downloader, "initial", ebakus.SyncProgress{
  1407  		HighestBlock: uint64(brokenChain.len() - 1),
  1408  	})
  1409  	progress <- struct{}{}
  1410  	pending.Wait()
  1411  	afterFailedSync := tester.downloader.Progress()
  1412  
  1413  	// Synchronise with a good peer and check that the progress height has been reduced to
  1414  	// the true value.
  1415  	validChain := chain.shorten(chain.len() - numMissing)
  1416  	tester.newPeer("valid", protocol, validChain)
  1417  	pending.Add(1)
  1418  
  1419  	go func() {
  1420  		defer pending.Done()
  1421  		if err := tester.sync("valid", nil, mode); err != nil {
  1422  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1423  		}
  1424  	}()
  1425  	<-starting
  1426  	checkProgress(t, tester.downloader, "completing", ebakus.SyncProgress{
  1427  		CurrentBlock: afterFailedSync.CurrentBlock,
  1428  		HighestBlock: uint64(validChain.len() - 1),
  1429  	})
  1430  
  1431  	// Check final progress after successful sync.
  1432  	progress <- struct{}{}
  1433  	pending.Wait()
  1434  	checkProgress(t, tester.downloader, "final", ebakus.SyncProgress{
  1435  		CurrentBlock: uint64(validChain.len() - 1),
  1436  		HighestBlock: uint64(validChain.len() - 1),
  1437  	})
  1438  }
  1439  
  1440  // This test reproduces an issue where unexpected deliveries would
  1441  // block indefinitely if they arrived at the right time.
  1442  func TestDeliverHeadersHang(t *testing.T) {
  1443  	t.Parallel()
  1444  
  1445  	testCases := []struct {
  1446  		protocol int
  1447  		syncMode SyncMode
  1448  	}{
  1449  		{62, FullSync},
  1450  		{63, FullSync},
  1451  		{63, FastSync},
  1452  		{64, FullSync},
  1453  		{64, FastSync},
  1454  		{64, LightSync},
  1455  	}
  1456  	for _, tc := range testCases {
  1457  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1458  			t.Parallel()
  1459  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1460  		})
  1461  	}
  1462  }
  1463  
  1464  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1465  	master := newTester()
  1466  	defer master.terminate()
  1467  	chain := testChainBase.shorten(15)
  1468  
  1469  	for i := 0; i < 200; i++ {
  1470  		tester := newTester()
  1471  		tester.peerDb = master.peerDb
  1472  		tester.newPeer("peer", protocol, chain)
  1473  
  1474  		// Whenever the downloader requests headers, flood it with
  1475  		// a lot of unrequested header deliveries.
  1476  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1477  			peer:   tester.downloader.peers.peers["peer"].peer,
  1478  			tester: tester,
  1479  		}
  1480  		if err := tester.sync("peer", nil, mode); err != nil {
  1481  			t.Errorf("test %d: sync failed: %v", i, err)
  1482  		}
  1483  		tester.terminate()
  1484  	}
  1485  }
  1486  
  1487  type floodingTestPeer struct {
  1488  	peer   Peer
  1489  	tester *downloadTester
  1490  }
  1491  
  1492  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1493  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1494  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1495  }
  1496  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1497  	return ftp.peer.RequestBodies(hashes)
  1498  }
  1499  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1500  	return ftp.peer.RequestReceipts(hashes)
  1501  }
  1502  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1503  	return ftp.peer.RequestNodeData(hashes)
  1504  }
  1505  
  1506  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1507  	deliveriesDone := make(chan struct{}, 500)
  1508  	for i := 0; i < cap(deliveriesDone)-1; i++ {
  1509  		peer := fmt.Sprintf("fake-peer%d", i)
  1510  		go func() {
  1511  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1512  			deliveriesDone <- struct{}{}
  1513  		}()
  1514  	}
  1515  
  1516  	// None of the extra deliveries should block.
  1517  	timeout := time.After(60 * time.Second)
  1518  	launched := false
  1519  	for i := 0; i < cap(deliveriesDone); i++ {
  1520  		select {
  1521  		case <-deliveriesDone:
  1522  			if !launched {
  1523  				// Start delivering the requested headers
  1524  				// after one of the flooding responses has arrived.
  1525  				go func() {
  1526  					ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1527  					deliveriesDone <- struct{}{}
  1528  				}()
  1529  				launched = true
  1530  			}
  1531  		case <-timeout:
  1532  			panic("blocked")
  1533  		}
  1534  	}
  1535  	return nil
  1536  }
  1537  
  1538  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1539  	testCases := []struct {
  1540  		remoteHeight uint64
  1541  		localHeight  uint64
  1542  		expected     []int
  1543  	}{
  1544  		// Remote is way higher. We should ask for the remote head and go backwards
  1545  		{1500, 1000,
  1546  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1547  		},
  1548  		{15000, 13006,
  1549  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1550  		},
  1551  		//Remote is pretty close to us. We don't have to fetch as many
  1552  		{1200, 1150,
  1553  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1554  		},
  1555  		// Remote is equal to us (so on a fork with higher td)
  1556  		// We should get the closest couple of ancestors
  1557  		{1500, 1500,
  1558  			[]int{1497, 1499},
  1559  		},
  1560  		// We're higher than the remote! Odd
  1561  		{1000, 1500,
  1562  			[]int{997, 999},
  1563  		},
  1564  		// Check some weird edgecases that it behaves somewhat rationally
  1565  		{0, 1500,
  1566  			[]int{0, 2},
  1567  		},
  1568  		{6000000, 0,
  1569  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1570  		},
  1571  		{0, 0,
  1572  			[]int{0, 2},
  1573  		},
  1574  	}
  1575  	reqs := func(from, count, span int) []int {
  1576  		var r []int
  1577  		num := from
  1578  		for len(r) < count {
  1579  			r = append(r, num)
  1580  			num += span + 1
  1581  		}
  1582  		return r
  1583  	}
  1584  	for i, tt := range testCases {
  1585  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1586  		data := reqs(int(from), count, span)
  1587  
  1588  		if max != uint64(data[len(data)-1]) {
  1589  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1590  		}
  1591  		failed := false
  1592  		if len(data) != len(tt.expected) {
  1593  			failed = true
  1594  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1595  		} else {
  1596  			for j, n := range data {
  1597  				if n != tt.expected[j] {
  1598  					failed = true
  1599  					break
  1600  				}
  1601  			}
  1602  		}
  1603  		if failed {
  1604  			res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1605  			exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1606  			t.Logf("got: %v\n", res)
  1607  			t.Logf("exp: %v\n", exp)
  1608  			t.Errorf("test %d: wrong values", i)
  1609  		}
  1610  	}
  1611  }
  1612  
  1613  // Tests that peers below a pre-configured checkpoint block are prevented from
  1614  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1615  func TestCheckpointEnforcement62(t *testing.T)      { testCheckpointEnforcement(t, 62, FullSync) }
  1616  func TestCheckpointEnforcement63Full(t *testing.T)  { testCheckpointEnforcement(t, 63, FullSync) }
  1617  func TestCheckpointEnforcement63Fast(t *testing.T)  { testCheckpointEnforcement(t, 63, FastSync) }
  1618  func TestCheckpointEnforcement64Full(t *testing.T)  { testCheckpointEnforcement(t, 64, FullSync) }
  1619  func TestCheckpointEnforcement64Fast(t *testing.T)  { testCheckpointEnforcement(t, 64, FastSync) }
  1620  func TestCheckpointEnforcement64Light(t *testing.T) { testCheckpointEnforcement(t, 64, LightSync) }
  1621  
  1622  func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) {
  1623  	t.Parallel()
  1624  
  1625  	// Create a new tester with a particular hard coded checkpoint block
  1626  	tester := newTester()
  1627  	defer tester.terminate()
  1628  
  1629  	tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1630  	chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1631  
  1632  	// Attempt to sync with the peer and validate the result
  1633  	tester.newPeer("peer", protocol, chain)
  1634  
  1635  	var expect error
  1636  	if mode == FastSync || mode == LightSync {
  1637  		expect = errUnsyncedPeer
  1638  	}
  1639  	if err := tester.sync("peer", nil, mode); err != expect {
  1640  		t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1641  	}
  1642  	if mode == FastSync || mode == LightSync {
  1643  		assertOwnChain(t, tester, 1)
  1644  	} else {
  1645  		assertOwnChain(t, tester, chain.len())
  1646  	}
  1647  }