github.com/halybang/go-ethereum@v1.0.5-0.20180325041310-3b262bc1367c/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/wanchain/go-wanchain/common"
    29  	"github.com/wanchain/go-wanchain/consensus/ethash"
    30  	"github.com/wanchain/go-wanchain/core"
    31  	"github.com/wanchain/go-wanchain/core/state"
    32  	"github.com/wanchain/go-wanchain/core/types"
    33  	"github.com/wanchain/go-wanchain/core/vm"
    34  	"github.com/wanchain/go-wanchain/crypto"
    35  	"github.com/wanchain/go-wanchain/ethdb"
    36  	"github.com/wanchain/go-wanchain/event"
    37  	"github.com/wanchain/go-wanchain/params"
    38  	"github.com/wanchain/go-wanchain/trie"
    39  )
    40  
    41  var (
    42  	testKey, _     = crypto.HexToECDSA("f1572f76b75b40a7da72d6f2ee7fda3d1189c2d28f0a2f096347055abe344d7f")
    43  	coinbaseKey, _ = crypto.HexToECDSA("900d0981bde924f82b7e8ccec52e2b07c2b0835cc22143d87f7dae2b733b3e57")
    44  	testAddress    = crypto.PubkeyToAddress(testKey.PublicKey)
    45  	coinbase       = crypto.PubkeyToAddress(coinbaseKey.PublicKey)
    46  )
    47  
    48  // Reduce some of the parameters to make the tester faster.
    49  func init() {
    50  	MaxForkAncestry = uint64(10000)
    51  	blockCacheLimit = 1024
    52  	fsCriticalTrials = 10
    53  }
    54  
    55  // downloadTester is a test simulator for mocking out local block chain.
    56  type downloadTester struct {
    57  	downloader *Downloader
    58  
    59  	genesis *types.Block   // Genesis blocks used by the tester and peers
    60  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    61  	peerDb  ethdb.Database // Database of the peers containing all data
    62  
    63  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    64  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    65  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    66  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    67  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    68  
    69  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    70  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    71  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    72  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    73  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    74  
    75  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    76  
    77  	lock sync.RWMutex
    78  }
    79  
    80  // newTester creates a new downloader test mocker.
    81  func newTester() *downloadTester {
    82  	testdb, _ := ethdb.NewMemDatabase()
    83  	gspec := core.DefaultPPOWTestingGenesisBlock()
    84  	genesis := gspec.MustCommit(testdb)
    85  
    86  	tester := &downloadTester{
    87  		genesis:           genesis,
    88  		peerDb:            testdb,
    89  		ownHashes:         []common.Hash{genesis.Hash()},
    90  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    91  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    92  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    93  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    94  		peerHashes:        make(map[string][]common.Hash),
    95  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    96  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    97  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    98  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    99  		peerMissingStates: make(map[string]map[common.Hash]bool),
   100  	}
   101  
   102  	tester.stateDb, _ = ethdb.NewMemDatabase()
   103  	gspec.MustCommit(tester.stateDb)
   104  
   105  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   106  
   107  	return tester
   108  }
   109  
   110  // makeChain creates a chain of n blocks starting at and including parent.
   111  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   112  // contains a transaction and every 5th an uncle to allow testing correct block
   113  // reassembly.
   114  func (dl *downloadTester) makeChain(n int, seed common.Address, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   115  
   116  	// Generate the block chain
   117  	gspec := core.DefaultPPOWTestingGenesisBlock()
   118  	engine := ethash.NewFaker(dl.peerDb)
   119  
   120  	chain, _ := core.NewBlockChain(dl.peerDb, params.TestChainConfig, engine, vm.Config{})
   121  	defer chain.Stop()
   122  	env := core.NewChainEnv(params.TestChainConfig, gspec, engine, chain, dl.peerDb)
   123  
   124  	blocks, receipts := env.GenerateChain(parent, n, func(i int, block *core.BlockGen) {
   125  		block.SetCoinbase(seed)
   126  
   127  		// If a heavy chain is requested, delay blocks to raise difficulty
   128  		if heavy {
   129  			block.OffsetTime(-1)
   130  		}
   131  		// If the block number is multiple of 3, send a bonus transaction to the miner
   132  		if parent == dl.genesis && i%3 == 0 {
   133  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   134  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), seed, big.NewInt(1000), new(big.Int).SetUint64(params.TxGas), nil, nil), signer, testKey)
   135  			if err != nil {
   136  				panic(err)
   137  			}
   138  			block.AddTx(tx)
   139  		}
   140  	})
   141  
   142  	// Convert the block-chain into a hash-chain and header/block maps
   143  	hashes := make([]common.Hash, n+1)
   144  	hashes[len(hashes)-1] = parent.Hash()
   145  
   146  	headerm := make(map[common.Hash]*types.Header, n+1)
   147  	headerm[parent.Hash()] = parent.Header()
   148  
   149  	blockm := make(map[common.Hash]*types.Block, n+1)
   150  	blockm[parent.Hash()] = parent
   151  
   152  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   153  	receiptm[parent.Hash()] = parentReceipts
   154  
   155  	for i, b := range blocks {
   156  		hashes[len(hashes)-i-2] = b.Hash()
   157  		headerm[b.Hash()] = b.Header()
   158  		blockm[b.Hash()] = b
   159  		receiptm[b.Hash()] = receipts[i]
   160  	}
   161  	return hashes, headerm, blockm, receiptm
   162  }
   163  
   164  // terminate aborts any operations on the embedded downloader and releases all
   165  // held resources.
   166  func (dl *downloadTester) terminate() {
   167  	dl.downloader.Terminate()
   168  }
   169  
   170  // sync starts synchronizing with a remote peer, blocking until it completes.
   171  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   172  	dl.lock.RLock()
   173  	hash := dl.peerHashes[id][0]
   174  	// If no particular TD was requested, load from the peer's blockchain
   175  	if td == nil {
   176  		td = big.NewInt(1)
   177  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   178  			td = diff
   179  		}
   180  	}
   181  	dl.lock.RUnlock()
   182  
   183  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   184  	err := dl.downloader.synchronise(id, hash, td, mode)
   185  	select {
   186  	case <-dl.downloader.cancelCh:
   187  		// Ok, downloader fully cancelled after sync cycle
   188  	default:
   189  		// Downloader is still accepting packets, can block a peer up
   190  		panic("downloader active post sync cycle") // panic will be caught by tester
   191  	}
   192  	return err
   193  }
   194  
   195  // HasHeader checks if a header is present in the testers canonical chain.
   196  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   197  	return dl.GetHeaderByHash(hash) != nil
   198  }
   199  
   200  // HasBlockAndState checks if a block and associated state is present in the testers canonical chain.
   201  func (dl *downloadTester) HasBlockAndState(hash common.Hash) bool {
   202  	block := dl.GetBlockByHash(hash)
   203  	if block == nil {
   204  		return false
   205  	}
   206  	_, err := dl.stateDb.Get(block.Root().Bytes())
   207  	return err == nil
   208  }
   209  
   210  // GetHeader retrieves a header from the testers canonical chain.
   211  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   212  	dl.lock.RLock()
   213  	defer dl.lock.RUnlock()
   214  
   215  	return dl.ownHeaders[hash]
   216  }
   217  
   218  // GetBlock retrieves a block from the testers canonical chain.
   219  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   220  	dl.lock.RLock()
   221  	defer dl.lock.RUnlock()
   222  
   223  	return dl.ownBlocks[hash]
   224  }
   225  
   226  // CurrentHeader retrieves the current head header from the canonical chain.
   227  func (dl *downloadTester) CurrentHeader() *types.Header {
   228  	dl.lock.RLock()
   229  	defer dl.lock.RUnlock()
   230  
   231  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   232  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   233  			return header
   234  		}
   235  	}
   236  	return dl.genesis.Header()
   237  }
   238  
   239  // CurrentBlock retrieves the current head block from the canonical chain.
   240  func (dl *downloadTester) CurrentBlock() *types.Block {
   241  	dl.lock.RLock()
   242  	defer dl.lock.RUnlock()
   243  
   244  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   245  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   246  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   247  				return block
   248  			}
   249  		}
   250  	}
   251  	return dl.genesis
   252  }
   253  
   254  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   255  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   256  	dl.lock.RLock()
   257  	defer dl.lock.RUnlock()
   258  
   259  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   260  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   261  			return block
   262  		}
   263  	}
   264  	return dl.genesis
   265  }
   266  
   267  // FastSyncCommitHead manually sets the head block to a given hash.
   268  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   269  	// For now only check that the state trie is correct
   270  	if block := dl.GetBlockByHash(hash); block != nil {
   271  		_, err := trie.NewSecure(block.Root(), dl.stateDb, 0)
   272  		return err
   273  	}
   274  	return fmt.Errorf("non existent block: %x", hash[:4])
   275  }
   276  
   277  // GetTdByHash retrieves the block's total difficulty from the canonical chain.
   278  func (dl *downloadTester) GetTdByHash(hash common.Hash) *big.Int {
   279  	dl.lock.RLock()
   280  	defer dl.lock.RUnlock()
   281  
   282  	return dl.ownChainTd[hash]
   283  }
   284  
   285  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   286  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   287  	dl.lock.Lock()
   288  	defer dl.lock.Unlock()
   289  
   290  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   291  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   292  		return 0, errors.New("unknown parent")
   293  	}
   294  	for i := 1; i < len(headers); i++ {
   295  		if headers[i].ParentHash != headers[i-1].Hash() {
   296  			return i, errors.New("unknown parent")
   297  		}
   298  	}
   299  	// Do a full insert if pre-checks passed
   300  	for i, header := range headers {
   301  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   302  			continue
   303  		}
   304  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   305  			return i, errors.New("unknown parent")
   306  		}
   307  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   308  		dl.ownHeaders[header.Hash()] = header
   309  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   310  	}
   311  	return len(headers), nil
   312  }
   313  
   314  // InsertChain injects a new batch of blocks into the simulated chain.
   315  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   316  	dl.lock.Lock()
   317  	defer dl.lock.Unlock()
   318  
   319  	for i, block := range blocks {
   320  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   321  			return i, errors.New("unknown parent")
   322  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   323  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   324  		}
   325  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   326  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   327  			dl.ownHeaders[block.Hash()] = block.Header()
   328  		}
   329  		dl.ownBlocks[block.Hash()] = block
   330  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   331  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   332  	}
   333  	return len(blocks), nil
   334  }
   335  
   336  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   337  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   338  	dl.lock.Lock()
   339  	defer dl.lock.Unlock()
   340  
   341  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   342  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   343  			return i, errors.New("unknown owner")
   344  		}
   345  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   346  			return i, errors.New("unknown parent")
   347  		}
   348  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   349  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   350  	}
   351  	return len(blocks), nil
   352  }
   353  
   354  // Rollback removes some recently added elements from the chain.
   355  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   356  	dl.lock.Lock()
   357  	defer dl.lock.Unlock()
   358  
   359  	for i := len(hashes) - 1; i >= 0; i-- {
   360  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   361  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   362  		}
   363  		delete(dl.ownChainTd, hashes[i])
   364  		delete(dl.ownHeaders, hashes[i])
   365  		delete(dl.ownReceipts, hashes[i])
   366  		delete(dl.ownBlocks, hashes[i])
   367  	}
   368  }
   369  
   370  // newPeer registers a new block download source into the downloader.
   371  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   372  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   373  }
   374  
   375  // newSlowPeer registers a new block download source into the downloader, with a
   376  // specific delay time on processing the network packets sent to it, simulating
   377  // potentially slow network IO.
   378  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   379  	dl.lock.Lock()
   380  	defer dl.lock.Unlock()
   381  
   382  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   383  	if err == nil {
   384  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   385  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   386  		copy(dl.peerHashes[id], hashes)
   387  
   388  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   389  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   390  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   391  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   392  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   393  
   394  		genesis := hashes[len(hashes)-1]
   395  		if header := headers[genesis]; header != nil {
   396  			dl.peerHeaders[id][genesis] = header
   397  			dl.peerChainTds[id][genesis] = header.Difficulty
   398  		}
   399  		if block := blocks[genesis]; block != nil {
   400  			dl.peerBlocks[id][genesis] = block
   401  			dl.peerChainTds[id][genesis] = block.Difficulty()
   402  		}
   403  
   404  		for i := len(hashes) - 2; i >= 0; i-- {
   405  			hash := hashes[i]
   406  
   407  			if header, ok := headers[hash]; ok {
   408  				dl.peerHeaders[id][hash] = header
   409  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   410  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   411  				}
   412  			}
   413  			if block, ok := blocks[hash]; ok {
   414  				dl.peerBlocks[id][hash] = block
   415  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   416  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   417  				}
   418  			}
   419  			if receipt, ok := receipts[hash]; ok {
   420  				dl.peerReceipts[id][hash] = receipt
   421  			}
   422  		}
   423  	}
   424  	return err
   425  }
   426  
   427  // dropPeer simulates a hard peer removal from the connection pool.
   428  func (dl *downloadTester) dropPeer(id string) {
   429  	dl.lock.Lock()
   430  	defer dl.lock.Unlock()
   431  
   432  	delete(dl.peerHashes, id)
   433  	delete(dl.peerHeaders, id)
   434  	delete(dl.peerBlocks, id)
   435  	delete(dl.peerChainTds, id)
   436  
   437  	dl.downloader.UnregisterPeer(id)
   438  }
   439  
   440  type downloadTesterPeer struct {
   441  	dl    *downloadTester
   442  	id    string
   443  	delay time.Duration
   444  	lock  sync.RWMutex
   445  }
   446  
   447  // setDelay is a thread safe setter for the network delay value.
   448  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   449  	dlp.lock.Lock()
   450  	defer dlp.lock.Unlock()
   451  
   452  	dlp.delay = delay
   453  }
   454  
   455  // waitDelay is a thread safe way to sleep for the configured time.
   456  func (dlp *downloadTesterPeer) waitDelay() {
   457  	dlp.lock.RLock()
   458  	delay := dlp.delay
   459  	dlp.lock.RUnlock()
   460  
   461  	time.Sleep(delay)
   462  }
   463  
   464  // Head constructs a function to retrieve a peer's current head hash
   465  // and total difficulty.
   466  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   467  	dlp.dl.lock.RLock()
   468  	defer dlp.dl.lock.RUnlock()
   469  
   470  	return dlp.dl.peerHashes[dlp.id][0], nil
   471  }
   472  
   473  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   474  // origin; associated with a particular peer in the download tester. The returned
   475  // function can be used to retrieve batches of headers from the particular peer.
   476  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   477  	// Find the canonical number of the hash
   478  	dlp.dl.lock.RLock()
   479  	number := uint64(0)
   480  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   481  		if hash == origin {
   482  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   483  			break
   484  		}
   485  	}
   486  	dlp.dl.lock.RUnlock()
   487  
   488  	// Use the absolute header fetcher to satisfy the query
   489  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   490  }
   491  
   492  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   493  // origin; associated with a particular peer in the download tester. The returned
   494  // function can be used to retrieve batches of headers from the particular peer.
   495  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   496  	dlp.waitDelay()
   497  
   498  	dlp.dl.lock.RLock()
   499  	defer dlp.dl.lock.RUnlock()
   500  
   501  	// Gather the next batch of headers
   502  	hashes := dlp.dl.peerHashes[dlp.id]
   503  	headers := dlp.dl.peerHeaders[dlp.id]
   504  	result := make([]*types.Header, 0, amount)
   505  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   506  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   507  			result = append(result, header)
   508  		}
   509  	}
   510  	// Delay delivery a bit to allow attacks to unfold
   511  	go func() {
   512  		time.Sleep(time.Millisecond)
   513  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   514  	}()
   515  	return nil
   516  }
   517  
   518  // RequestBodies constructs a getBlockBodies method associated with a particular
   519  // peer in the download tester. The returned function can be used to retrieve
   520  // batches of block bodies from the particularly requested peer.
   521  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   522  	dlp.waitDelay()
   523  
   524  	dlp.dl.lock.RLock()
   525  	defer dlp.dl.lock.RUnlock()
   526  
   527  	blocks := dlp.dl.peerBlocks[dlp.id]
   528  
   529  	transactions := make([][]*types.Transaction, 0, len(hashes))
   530  	uncles := make([][]*types.Header, 0, len(hashes))
   531  
   532  	for _, hash := range hashes {
   533  		if block, ok := blocks[hash]; ok {
   534  			transactions = append(transactions, block.Transactions())
   535  			uncles = append(uncles, block.Uncles())
   536  		}
   537  	}
   538  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   539  
   540  	return nil
   541  }
   542  
   543  // RequestReceipts constructs a getReceipts method associated with a particular
   544  // peer in the download tester. The returned function can be used to retrieve
   545  // batches of block receipts from the particularly requested peer.
   546  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   547  	dlp.waitDelay()
   548  
   549  	dlp.dl.lock.RLock()
   550  	defer dlp.dl.lock.RUnlock()
   551  
   552  	receipts := dlp.dl.peerReceipts[dlp.id]
   553  
   554  	results := make([][]*types.Receipt, 0, len(hashes))
   555  	for _, hash := range hashes {
   556  		if receipt, ok := receipts[hash]; ok {
   557  			results = append(results, receipt)
   558  		}
   559  	}
   560  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   561  
   562  	return nil
   563  }
   564  
   565  // RequestNodeData constructs a getNodeData method associated with a particular
   566  // peer in the download tester. The returned function can be used to retrieve
   567  // batches of node state data from the particularly requested peer.
   568  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   569  	dlp.waitDelay()
   570  
   571  	dlp.dl.lock.RLock()
   572  	defer dlp.dl.lock.RUnlock()
   573  
   574  	results := make([][]byte, 0, len(hashes))
   575  	for _, hash := range hashes {
   576  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   577  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   578  				results = append(results, data)
   579  			}
   580  		}
   581  	}
   582  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   583  
   584  	return nil
   585  }
   586  
   587  // assertOwnChain checks if the local chain contains the correct number of items
   588  // of the various chain components.
   589  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   590  	assertOwnForkedChain(t, tester, 1, []int{length})
   591  }
   592  
   593  // assertOwnForkedChain checks if the local forked chain contains the correct
   594  // number of items of the various chain components.
   595  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   596  	// Initialize the counters for the first fork
   597  	headers, blocks := lengths[0], lengths[0]
   598  
   599  	minReceipts, maxReceipts := lengths[0]-fsMinFullBlocks-fsPivotInterval, lengths[0]-fsMinFullBlocks
   600  	if minReceipts < 0 {
   601  		minReceipts = 1
   602  	}
   603  	if maxReceipts < 0 {
   604  		maxReceipts = 1
   605  	}
   606  	// Update the counters for each subsequent fork
   607  	for _, length := range lengths[1:] {
   608  		headers += length - common
   609  		blocks += length - common
   610  
   611  		minReceipts += length - common - fsMinFullBlocks - fsPivotInterval
   612  		maxReceipts += length - common - fsMinFullBlocks
   613  	}
   614  
   615  	switch tester.downloader.mode {
   616  	case FullSync:
   617  		minReceipts, maxReceipts = 1, 1
   618  	case LightSync:
   619  		blocks, minReceipts, maxReceipts = 1, 1, 1
   620  	}
   621  	if hs := len(tester.ownHeaders); hs != headers {
   622  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   623  	}
   624  	if bs := len(tester.ownBlocks); bs != blocks {
   625  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   626  	}
   627  	if rs := len(tester.ownReceipts); rs < minReceipts || rs > maxReceipts {
   628  		t.Fatalf("synchronised receipts mismatch: have %v, want between [%v, %v]", rs, minReceipts, maxReceipts)
   629  	}
   630  	// Verify the state trie too for fast syncs
   631  	if tester.downloader.mode == FastSync {
   632  		var index int
   633  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   634  			index = pivot
   635  		} else {
   636  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   637  		}
   638  		if index > 0 {
   639  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(tester.stateDb)); statedb == nil || err != nil {
   640  				t.Fatalf("state reconstruction failed: %v", err)
   641  			}
   642  		}
   643  	}
   644  }
   645  
   646  // Tests that simple synchronization against a canonical chain works correctly.
   647  // In this test common ancestor lookup should be short circuited and not require
   648  // binary searching.
   649  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   650  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   651  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   652  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   653  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   654  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   655  
   656  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   657  	t.Parallel()
   658  
   659  	tester := newTester()
   660  	defer tester.terminate()
   661  
   662  	// Create a small enough block chain to download
   663  	// targetBlocks := blockCacheLimit - 15
   664  	targetBlocks := 20
   665  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
   666  
   667  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   668  
   669  	// Synchronise with the peer and make sure all relevant data was retrieved
   670  	if err := tester.sync("peer", nil, mode); err != nil {
   671  		t.Fatalf("failed to synchronise blocks: %v", err)
   672  	}
   673  
   674  	assertOwnChain(t, tester, targetBlocks+1)
   675  }
   676  
   677  // Tests that if a large batch of blocks are being downloaded, it is throttled
   678  // until the cached blocks are retrieved.
   679  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   680  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   681  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   682  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   683  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   684  
   685  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   686  	tester := newTester()
   687  	defer tester.terminate()
   688  
   689  	// Create a long block chain to download and the tester
   690  	// targetBlocks := 8 * blockCacheLimit
   691  	targetBlocks := 20
   692  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
   693  
   694  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   695  
   696  	// Wrap the importer to allow stepping
   697  	blocked, proceed := uint32(0), make(chan struct{})
   698  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   699  		atomic.StoreUint32(&blocked, uint32(len(results)))
   700  		<-proceed
   701  	}
   702  	// Start a synchronisation concurrently
   703  	errc := make(chan error)
   704  	go func() {
   705  		errc <- tester.sync("peer", nil, mode)
   706  	}()
   707  	// Iteratively take some blocks, always checking the retrieval count
   708  	for {
   709  		// Check the retrieval count synchronously (! reason for this ugly block)
   710  		tester.lock.RLock()
   711  		retrieved := len(tester.ownBlocks)
   712  		tester.lock.RUnlock()
   713  		if retrieved >= targetBlocks+1 {
   714  			break
   715  		}
   716  		// Wait a bit for sync to throttle itself
   717  		var cached, frozen int
   718  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   719  			time.Sleep(25 * time.Millisecond)
   720  
   721  			tester.lock.Lock()
   722  			tester.downloader.queue.lock.Lock()
   723  			cached = len(tester.downloader.queue.blockDonePool)
   724  			if mode == FastSync {
   725  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   726  					if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   727  						cached = receipts
   728  					}
   729  				}
   730  			}
   731  			frozen = int(atomic.LoadUint32(&blocked))
   732  			retrieved = len(tester.ownBlocks)
   733  			tester.downloader.queue.lock.Unlock()
   734  			tester.lock.Unlock()
   735  
   736  			if cached == blockCacheLimit || retrieved+cached+frozen == targetBlocks+1 {
   737  				break
   738  			}
   739  		}
   740  		// Make sure we filled up the cache, then exhaust it
   741  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   742  
   743  		tester.lock.RLock()
   744  		retrieved = len(tester.ownBlocks)
   745  		tester.lock.RUnlock()
   746  		if cached != blockCacheLimit && retrieved+cached+frozen != targetBlocks+1 {
   747  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheLimit, retrieved, frozen, targetBlocks+1)
   748  		}
   749  		// Permit the blocked blocks to import
   750  		if atomic.LoadUint32(&blocked) > 0 {
   751  			atomic.StoreUint32(&blocked, uint32(0))
   752  			proceed <- struct{}{}
   753  		}
   754  	}
   755  	// Check that we haven't pulled more blocks than available
   756  	assertOwnChain(t, tester, targetBlocks+1)
   757  	if err := <-errc; err != nil {
   758  		t.Fatalf("block synchronization failed: %v", err)
   759  	}
   760  }
   761  
   762  // Tests that an inactive downloader will not accept incoming block headers and
   763  // bodies.
   764  func TestInactiveDownloader62(t *testing.T) {
   765  	t.Parallel()
   766  
   767  	tester := newTester()
   768  	defer tester.terminate()
   769  
   770  	// Check that neither block headers nor bodies are accepted
   771  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   772  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   773  	}
   774  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   775  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   776  	}
   777  }
   778  
   779  // Tests that an inactive downloader will not accept incoming block headers,
   780  // bodies and receipts.
   781  func TestInactiveDownloader63(t *testing.T) {
   782  	t.Parallel()
   783  
   784  	tester := newTester()
   785  	defer tester.terminate()
   786  
   787  	// Check that neither block headers nor bodies are accepted
   788  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   789  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   790  	}
   791  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   792  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   793  	}
   794  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   795  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   796  	}
   797  }
   798  
   799  // Tests that a canceled download wipes all previously accumulated state.
   800  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   801  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   802  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   803  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   804  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   805  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   806  
   807  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   808  	t.Parallel()
   809  
   810  	tester := newTester()
   811  	defer tester.terminate()
   812  
   813  	// Create a small enough block chain to download and the tester
   814  	targetBlocks := 15
   815  	if targetBlocks >= MaxHashFetch {
   816  		targetBlocks = MaxHashFetch - 15
   817  	}
   818  
   819  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
   820  
   821  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   822  
   823  	// Make sure canceling works with a pristine downloader
   824  	tester.downloader.Cancel()
   825  	if !tester.downloader.queue.Idle() {
   826  		t.Errorf("download queue not idle")
   827  	}
   828  	// Synchronise with the peer, but cancel afterwards
   829  	if err := tester.sync("peer", nil, mode); err != nil {
   830  		t.Fatalf("failed to synchronise blocks: %v", err)
   831  	}
   832  
   833  
   834  	time.Sleep(45 * time.Second)
   835  
   836  
   837  	tester.downloader.Cancel()
   838  	if !tester.downloader.queue.Idle() {
   839  		t.Errorf("download queue not idle")
   840  	}
   841  }
   842  
   843  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   844  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   845  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   846  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   847  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   848  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   849  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   850  
   851  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   852  	t.Parallel()
   853  
   854  	tester := newTester()
   855  	defer tester.terminate()
   856  
   857  	// Create various peers with various parts of the chain
   858  	targetPeers := 4
   859  	targetBlocks := 20
   860  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
   861  
   862  	for i := 0; i < targetPeers; i++ {
   863  		id := fmt.Sprintf("peer #%d", i)
   864  		tester.newPeer(id, protocol, hashes[i*2:], headers, blocks, receipts)
   865  	}
   866  	if err := tester.sync("peer #0", nil, mode); err != nil {
   867  		t.Fatalf("failed to synchronise blocks: %v", err)
   868  	}
   869  	assertOwnChain(t, tester, targetBlocks+1)
   870  }
   871  
   872  // Tests that synchronisations behave well in multi-version protocol environments
   873  // and not wreak havoc on other nodes in the network.
   874  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
   875  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
   876  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
   877  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
   878  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
   879  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
   880  
   881  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
   882  	t.Parallel()
   883  
   884  	tester := newTester()
   885  	defer tester.terminate()
   886  
   887  	// Create a small enough block chain to download
   888  	targetBlocks := 20
   889  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
   890  
   891  	// Create peers of every type
   892  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
   893  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
   894  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
   895  
   896  	// Synchronise with the requested peer and make sure all blocks were retrieved
   897  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   898  		t.Fatalf("failed to synchronise blocks: %v", err)
   899  	}
   900  	assertOwnChain(t, tester, targetBlocks+1)
   901  
   902  	// Check that no peers have been dropped off
   903  	for _, version := range []int{62, 63, 64} {
   904  		peer := fmt.Sprintf("peer %d", version)
   905  		if _, ok := tester.peerHashes[peer]; !ok {
   906  			t.Errorf("%s dropped", peer)
   907  		}
   908  	}
   909  }
   910  
   911  // Tests that if a block is empty (e.g. header only), no body request should be
   912  // made, and instead the header should be assembled into a whole block in itself.
   913  func TestEmptyShortCircuit62(t *testing.T)     { testEmptyShortCircuit(t, 62, FullSync) }
   914  func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) }
   915  
   916  // func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) }
   917  func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }
   918  
   919  // func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
   920  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
   921  
   922  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
   923  	t.Parallel()
   924  
   925  	tester := newTester()
   926  	defer tester.terminate()
   927  
   928  	// Create a block chain to download
   929  	targetBlocks := 15
   930  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
   931  
   932  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   933  
   934  	// Instrument the downloader to signal body requests
   935  	bodiesHave, receiptsHave := int32(0), int32(0)
   936  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   937  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   938  	}
   939  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   940  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   941  	}
   942  	// Synchronise with the peer and make sure all blocks were retrieved
   943  	if err := tester.sync("peer", nil, mode); err != nil {
   944  		t.Fatalf("failed to synchronise blocks: %v", err)
   945  	}
   946  
   947  	time.Sleep(10 * time.Second)
   948  
   949  	assertOwnChain(t, tester, targetBlocks+1)
   950  
   951  	// Validate the number of block bodies that should have been requested
   952  	bodiesNeeded, receiptsNeeded := 0, 0
   953  	for _, block := range blocks {
   954  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   955  			bodiesNeeded++
   956  		}
   957  	}
   958  	for hash, receipt := range receipts {
   959  		if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= tester.downloader.queue.fastSyncPivot {
   960  			receiptsNeeded++
   961  		}
   962  	}
   963  	if int(bodiesHave) != bodiesNeeded {
   964  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   965  	}
   966  	if int(receiptsHave) != receiptsNeeded {
   967  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   968  	}
   969  }
   970  
   971  // Tests that headers are enqueued continuously, preventing malicious nodes from
   972  // stalling the downloader by feeding gapped header chains.
   973  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
   974  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
   975  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
   976  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
   977  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
   978  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
   979  
   980  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
   981  	t.Parallel()
   982  
   983  	tester := newTester()
   984  	defer tester.terminate()
   985  
   986  	// Create a small enough block chain to download
   987  	targetBlocks := 20
   988  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
   989  
   990  	// Attempt a full sync with an attacker feeding gapped headers
   991  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
   992  	missing := targetBlocks / 2
   993  	delete(tester.peerHeaders["attack"], hashes[missing])
   994  
   995  	if err := tester.sync("attack", nil, mode); err == nil {
   996  		t.Fatalf("succeeded attacker synchronisation")
   997  	}
   998  	// Synchronise with the valid peer and make sure sync succeeds
   999  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1000  	if err := tester.sync("valid", nil, mode); err != nil {
  1001  		t.Fatalf("failed to synchronise blocks: %v", err)
  1002  	}
  1003  	assertOwnChain(t, tester, targetBlocks+1)
  1004  }
  1005  
  1006  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1007  // detects the invalid numbering.
  1008  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1009  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1010  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1011  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1012  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1013  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1014  
  1015  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1016  	tester := newTester()
  1017  	defer tester.terminate()
  1018  
  1019  	// Create a small enough block chain to download
  1020  	targetBlocks := 20
  1021  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
  1022  
  1023  	// Attempt a full sync with an attacker feeding shifted headers
  1024  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1025  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1026  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1027  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1028  
  1029  	if err := tester.sync("attack", nil, mode); err == nil {
  1030  		t.Fatalf("succeeded attacker synchronisation")
  1031  	}
  1032  	// Synchronise with the valid peer and make sure sync succeeds
  1033  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1034  	if err := tester.sync("valid", nil, mode); err != nil {
  1035  		t.Fatalf("failed to synchronise blocks: %v", err)
  1036  	}
  1037  	assertOwnChain(t, tester, targetBlocks+1)
  1038  }
  1039  
  1040  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1041  // for various failure scenarios. Afterwards a full sync is attempted to make
  1042  // sure no state was corrupted.
  1043  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1044  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1045  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1046  
  1047  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1048  	tester := newTester()
  1049  	defer tester.terminate()
  1050  
  1051  	// Create a small enough block chain to download
  1052  	targetBlocks := 3*fsHeaderSafetyNet + fsPivotInterval + fsMinFullBlocks
  1053  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
  1054  
  1055  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1056  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1057  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1058  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1059  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1060  
  1061  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1062  		t.Fatalf("succeeded fast attacker synchronisation")
  1063  	}
  1064  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1065  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1066  	}
  1067  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1068  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1069  	// rolled back, and also the pivot point being reverted to a non-block status.
  1070  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1071  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1072  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1073  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1074  
  1075  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1076  		t.Fatalf("succeeded block attacker synchronisation")
  1077  	}
  1078  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1079  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1080  	}
  1081  	if mode == FastSync {
  1082  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1083  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1084  		}
  1085  	}
  1086  	// Attempt to sync with an attacker that withholds promised blocks after the
  1087  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1088  	// but already imported pivot block.
  1089  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1090  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1091  
  1092  	tester.downloader.fsPivotFails = 0
  1093  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1094  		for i := missing; i <= len(hashes); i++ {
  1095  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1096  		}
  1097  		tester.downloader.syncInitHook = nil
  1098  	}
  1099  
  1100  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1101  		t.Fatalf("succeeded withholding attacker synchronisation")
  1102  	}
  1103  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1104  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1105  	}
  1106  	if mode == FastSync {
  1107  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1108  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1109  		}
  1110  	}
  1111  	tester.downloader.fsPivotFails = fsCriticalTrials
  1112  
  1113  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1114  	// rollback should also disable fast syncing for this process, verify that we
  1115  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1116  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1117  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1118  	if err := tester.sync("valid", nil, mode); err != nil {
  1119  		t.Fatalf("failed to synchronise blocks: %v", err)
  1120  	}
  1121  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1122  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1123  	}
  1124  	if mode != LightSync {
  1125  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1126  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1127  		}
  1128  	}
  1129  }
  1130  
  1131  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1132  // afterwards by not sending any useful hashes.
  1133  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1134  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1135  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1136  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1137  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1138  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1139  
  1140  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1141  	t.Parallel()
  1142  
  1143  	tester := newTester()
  1144  	defer tester.terminate()
  1145  
  1146  	hashes, headers, blocks, receipts := tester.makeChain(0, testAddress, tester.genesis, nil, false)
  1147  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1148  
  1149  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1150  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1151  	}
  1152  }
  1153  
  1154  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1155  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1156  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1157  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1158  
  1159  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1160  	// Define the disconnection requirement for individual hash fetch errors
  1161  	tests := []struct {
  1162  		result error
  1163  		drop   bool
  1164  	}{
  1165  		{nil, false},                        // Sync succeeded, all is well
  1166  		{errBusy, false},                    // Sync is already in progress, no problem
  1167  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1168  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1169  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1170  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1171  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1172  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1173  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1174  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1175  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1176  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1177  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1178  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1179  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1180  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1181  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1182  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1183  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1184  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1185  	}
  1186  	// Run the tests and check disconnection status
  1187  	tester := newTester()
  1188  	defer tester.terminate()
  1189  
  1190  	for i, tt := range tests {
  1191  		// Register a new peer and ensure it's presence
  1192  		id := fmt.Sprintf("test %d", i)
  1193  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1194  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1195  		}
  1196  		if _, ok := tester.peerHashes[id]; !ok {
  1197  			t.Fatalf("test %d: registered peer not found", i)
  1198  		}
  1199  		// Simulate a synchronisation and check the required result
  1200  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1201  
  1202  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1203  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1204  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1205  		}
  1206  	}
  1207  }
  1208  
  1209  // Tests that synchronisation progress (origin block number, current block number
  1210  // and highest block number) is tracked and updated correctly.
  1211  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1212  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1213  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1214  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1215  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1216  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1217  
  1218  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1219  	t.Parallel()
  1220  
  1221  	tester := newTester()
  1222  	defer tester.terminate()
  1223  
  1224  	// Create a small enough block chain to download
  1225  	targetBlocks := blockCacheLimit - 15
  1226  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
  1227  
  1228  	// Set a sync init hook to catch progress changes
  1229  	starting := make(chan struct{})
  1230  	progress := make(chan struct{})
  1231  
  1232  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1233  		starting <- struct{}{}
  1234  		<-progress
  1235  	}
  1236  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1237  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1238  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1239  	}
  1240  	// Synchronise half the blocks and check initial progress
  1241  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1242  	pending := new(sync.WaitGroup)
  1243  	pending.Add(1)
  1244  
  1245  	go func() {
  1246  		defer pending.Done()
  1247  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1248  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1249  		}
  1250  	}()
  1251  	<-starting
  1252  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1253  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1254  	}
  1255  	progress <- struct{}{}
  1256  	pending.Wait()
  1257  
  1258  	// Synchronise all the blocks and check continuation progress
  1259  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1260  	pending.Add(1)
  1261  
  1262  	go func() {
  1263  		defer pending.Done()
  1264  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1265  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1266  		}
  1267  	}()
  1268  	<-starting
  1269  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1270  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1271  	}
  1272  	progress <- struct{}{}
  1273  	pending.Wait()
  1274  
  1275  	// Check final progress after successful sync
  1276  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1277  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1278  	}
  1279  }
  1280  
  1281  // Tests that if synchronisation is aborted due to some failure, then the progress
  1282  // origin is not updated in the next sync cycle, as it should be considered the
  1283  // continuation of the previous sync and not a new instance.
  1284  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1285  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1286  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1287  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1288  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1289  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1290  
  1291  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1292  	t.Parallel()
  1293  
  1294  	tester := newTester()
  1295  	defer tester.terminate()
  1296  
  1297  	// Create a small enough block chain to download
  1298  	targetBlocks := blockCacheLimit - 15
  1299  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
  1300  
  1301  	// Set a sync init hook to catch progress changes
  1302  	starting := make(chan struct{})
  1303  	progress := make(chan struct{})
  1304  
  1305  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1306  		starting <- struct{}{}
  1307  		<-progress
  1308  	}
  1309  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1310  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1311  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1312  	}
  1313  	// Attempt a full sync with a faulty peer
  1314  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1315  	missing := targetBlocks / 2
  1316  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1317  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1318  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1319  
  1320  	pending := new(sync.WaitGroup)
  1321  	pending.Add(1)
  1322  
  1323  	go func() {
  1324  		defer pending.Done()
  1325  		if err := tester.sync("faulty", nil, mode); err == nil {
  1326  			panic("succeeded faulty synchronisation")
  1327  		}
  1328  	}()
  1329  	<-starting
  1330  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1331  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1332  	}
  1333  	progress <- struct{}{}
  1334  	pending.Wait()
  1335  
  1336  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1337  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1338  	pending.Add(1)
  1339  
  1340  	go func() {
  1341  		defer pending.Done()
  1342  		if err := tester.sync("valid", nil, mode); err != nil {
  1343  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1344  		}
  1345  	}()
  1346  	<-starting
  1347  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1348  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1349  	}
  1350  	progress <- struct{}{}
  1351  	pending.Wait()
  1352  
  1353  	// Check final progress after successful sync
  1354  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1355  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1356  	}
  1357  }
  1358  
  1359  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1360  // the progress height is successfully reduced at the next sync invocation.
  1361  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1362  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1363  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1364  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1365  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1366  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1367  
  1368  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1369  	t.Parallel()
  1370  
  1371  	tester := newTester()
  1372  	defer tester.terminate()
  1373  
  1374  	// Create a small block chain
  1375  	targetBlocks := blockCacheLimit - 15
  1376  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, testAddress, tester.genesis, nil, false)
  1377  
  1378  	// Set a sync init hook to catch progress changes
  1379  	starting := make(chan struct{})
  1380  	progress := make(chan struct{})
  1381  
  1382  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1383  		starting <- struct{}{}
  1384  		<-progress
  1385  	}
  1386  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1387  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1388  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1389  	}
  1390  	//  Create and sync with an attacker that promises a higher chain than available
  1391  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1392  	for i := 1; i < 3; i++ {
  1393  		delete(tester.peerHeaders["attack"], hashes[i])
  1394  		delete(tester.peerBlocks["attack"], hashes[i])
  1395  		delete(tester.peerReceipts["attack"], hashes[i])
  1396  	}
  1397  
  1398  	pending := new(sync.WaitGroup)
  1399  	pending.Add(1)
  1400  
  1401  	go func() {
  1402  		defer pending.Done()
  1403  		if err := tester.sync("attack", nil, mode); err == nil {
  1404  			panic("succeeded attacker synchronisation")
  1405  		}
  1406  	}()
  1407  	<-starting
  1408  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1409  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1410  	}
  1411  	progress <- struct{}{}
  1412  	pending.Wait()
  1413  
  1414  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1415  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1416  	pending.Add(1)
  1417  
  1418  	go func() {
  1419  		defer pending.Done()
  1420  		if err := tester.sync("valid", nil, mode); err != nil {
  1421  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1422  		}
  1423  	}()
  1424  	<-starting
  1425  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1426  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1427  	}
  1428  	progress <- struct{}{}
  1429  	pending.Wait()
  1430  
  1431  	// Check final progress after successful sync
  1432  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1433  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1434  	}
  1435  }
  1436  
  1437  // This test reproduces an issue where unexpected deliveries would
  1438  // block indefinitely if they arrived at the right time.
  1439  func TestDeliverHeadersHang62(t *testing.T)      { testDeliverHeadersHang(t, 62, FullSync) }
  1440  func TestDeliverHeadersHang63Full(t *testing.T)  { testDeliverHeadersHang(t, 63, FullSync) }
  1441  func TestDeliverHeadersHang63Fast(t *testing.T)  { testDeliverHeadersHang(t, 63, FastSync) }
  1442  func TestDeliverHeadersHang64Full(t *testing.T)  { testDeliverHeadersHang(t, 64, FullSync) }
  1443  func TestDeliverHeadersHang64Fast(t *testing.T)  { testDeliverHeadersHang(t, 64, FastSync) }
  1444  func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
  1445  
  1446  type floodingTestPeer struct {
  1447  	peer   Peer
  1448  	tester *downloadTester
  1449  }
  1450  
  1451  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1452  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1453  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1454  }
  1455  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1456  	return ftp.peer.RequestBodies(hashes)
  1457  }
  1458  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1459  	return ftp.peer.RequestReceipts(hashes)
  1460  }
  1461  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1462  	return ftp.peer.RequestNodeData(hashes)
  1463  }
  1464  
  1465  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1466  	deliveriesDone := make(chan struct{}, 500)
  1467  	for i := 0; i < cap(deliveriesDone); i++ {
  1468  		peer := fmt.Sprintf("fake-peer%d", i)
  1469  		go func() {
  1470  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1471  			deliveriesDone <- struct{}{}
  1472  		}()
  1473  	}
  1474  	// Deliver the actual requested headers.
  1475  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1476  	// None of the extra deliveries should block.
  1477  	timeout := time.After(15 * time.Second)
  1478  	for i := 0; i < cap(deliveriesDone); i++ {
  1479  		select {
  1480  		case <-deliveriesDone:
  1481  		case <-timeout:
  1482  			panic("blocked")
  1483  		}
  1484  	}
  1485  	return nil
  1486  }
  1487  
  1488  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1489  	t.Parallel()
  1490  
  1491  	master := newTester()
  1492  	defer master.terminate()
  1493  
  1494  	hashes, headers, blocks, receipts := master.makeChain(5, testAddress, master.genesis, nil, false)
  1495  	for i := 0; i < 150; i++ {
  1496  		tester := newTester()
  1497  		tester.peerDb = master.peerDb
  1498  
  1499  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1500  		// Whenever the downloader requests headers, flood it with
  1501  		// a lot of unrequested header deliveries.
  1502  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1503  			tester.downloader.peers.peers["peer"].peer,
  1504  			tester,
  1505  		}
  1506  
  1507  		if err := tester.sync("peer", nil, mode); err != nil {
  1508  			t.Errorf("sync failed: %v", err)
  1509  		}
  1510  		tester.terminate()
  1511  	}
  1512  }
  1513  
  1514  // Tests that if fast sync aborts in the critical section, it can restart a few
  1515  // times before giving up.
  1516  // func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
  1517  // func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
  1518  // func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
  1519  // func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
  1520  
  1521  func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
  1522  	tester := newTester()
  1523  	defer tester.terminate()
  1524  
  1525  	fsMin := 4
  1526  	fsInterval := 16
  1527  
  1528  	// Create a large enough blockchin to actually fast sync on
  1529  	// targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15
  1530  	targetBlocks := fsMin + 2*fsInterval
  1531  	// targetBlocks := 64
  1532  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, testAddress, tester.genesis, nil, false)
  1533  
  1534  	// Create a tester peer with a critical section header missing (force failures)
  1535  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1536  	delete(tester.peerHeaders["peer"], hashes[fsMin-1])
  1537  	tester.downloader.dropPeer = func(id string) {} // We reuse the same "faulty" peer throughout the test
  1538  
  1539  	// Remove all possible pivot state roots and slow down replies (test failure resets later)
  1540  	for i := 0; i < fsInterval; i++ {
  1541  		tester.peerMissingStates["peer"][headers[hashes[fsMin+i]].Root] = true
  1542  	}
  1543  	(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).setDelay(500 * time.Millisecond) // Enough to reach the critical section
  1544  
  1545  	// Synchronise with the peer a few times and make sure they fail until the retry limit
  1546  	for i := 0; i < int(fsCriticalTrials)-1; i++ {
  1547  		// Attempt a sync and ensure it fails properly
  1548  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1549  			t.Fatalf("failing fast sync succeeded: %v", err)
  1550  		}
  1551  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1552  
  1553  		// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
  1554  		if i == 0 {
  1555  			if tester.downloader.fsPivotLock == nil {
  1556  				time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
  1557  				t.Fatalf("pivot block not locked in after critical section failure")
  1558  			}
  1559  			tester.lock.Lock()
  1560  			tester.peerHeaders["peer"][hashes[fsMin-1]] = headers[hashes[fsMin-1]]
  1561  			tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true}
  1562  			(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).setDelay(0)
  1563  			tester.lock.Unlock()
  1564  		}
  1565  	}
  1566  	// Return all nodes if we're testing fast sync progression
  1567  	if progress {
  1568  		tester.lock.Lock()
  1569  		tester.peerMissingStates["peer"] = map[common.Hash]bool{}
  1570  		tester.lock.Unlock()
  1571  
  1572  		if err := tester.sync("peer", nil, FastSync); err != nil {
  1573  			t.Fatalf("failed to synchronise blocks in progressed fast sync: %v", err)
  1574  		}
  1575  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1576  
  1577  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != 1 {
  1578  			t.Fatalf("progressed pivot trial count mismatch: have %v, want %v", fails, 1)
  1579  		}
  1580  		assertOwnChain(t, tester, targetBlocks+1)
  1581  	} else {
  1582  		if err := tester.sync("peer", nil, FastSync); err == nil {
  1583  			t.Fatalf("succeeded to synchronise blocks in failed fast sync")
  1584  		}
  1585  		time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
  1586  
  1587  		if fails := atomic.LoadUint32(&tester.downloader.fsPivotFails); fails != fsCriticalTrials {
  1588  			t.Fatalf("failed pivot trial count mismatch: have %v, want %v", fails, fsCriticalTrials)
  1589  		}
  1590  	}
  1591  	// Retry limit exhausted, downloader will switch to full sync, should succeed
  1592  	if err := tester.sync("peer", nil, FastSync); err != nil {
  1593  		t.Fatalf("failed to synchronise blocks in slow sync: %v", err)
  1594  	}
  1595  	// Note, we can't assert the chain here because the test asserter assumes sync
  1596  	// completed using a single mode of operation, whereas fast-then-slow can result
  1597  	// in arbitrary intermediate state that's not cleanly verifiable.
  1598  }