github.com/cheng762/platon-go@v1.8.17-0.20190529111256-7deff2d7be26/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"github.com/PlatONnetwork/PlatON-Go/consensus/cbft"
    23  	"github.com/PlatONnetwork/PlatON-Go/core/ppos_storage"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/PlatONnetwork/PlatON-Go/common"
    31  	"github.com/PlatONnetwork/PlatON-Go/core"
    32  	"github.com/PlatONnetwork/PlatON-Go/core/types"
    33  	"github.com/PlatONnetwork/PlatON-Go/crypto"
    34  	"github.com/PlatONnetwork/PlatON-Go/ethdb"
    35  	"github.com/PlatONnetwork/PlatON-Go/event"
    36  	"github.com/PlatONnetwork/PlatON-Go/params"
    37  	"github.com/PlatONnetwork/PlatON-Go/trie"
    38  )
    39  
    40  var (
    41  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    42  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    43  )
    44  
    45  // Reduce some of the parameters to make the tester faster.
    46  func init() {
    47  	MaxForkAncestry = uint64(10000)
    48  	blockCacheItems = 1024
    49  	fsHeaderContCheck = 500 * time.Millisecond
    50  }
    51  
    52  // downloadTester is a test simulator for mocking out local block chain.
    53  type downloadTester struct {
    54  	downloader *Downloader
    55  
    56  	genesis *types.Block   // Genesis blocks used by the tester and peers
    57  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    58  	peerDb  ethdb.Database // Database of the peers containing all data
    59  
    60  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    61  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    62  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    63  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    64  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    65  
    66  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    67  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    68  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    69  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    70  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    71  	signatureLists map[string]map[common.Hash][]*common.BlockConfirmSign
    72  
    73  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    74  
    75  	lock sync.RWMutex
    76  }
    77  
    78  // newTester creates a new downloader test mocker.
    79  func newTester() *downloadTester {
    80  	testdb := ethdb.NewMemDatabase()
    81  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    82  
    83  	tester := &downloadTester{
    84  		genesis:           genesis,
    85  		peerDb:            testdb,
    86  		ownHashes:         []common.Hash{genesis.Hash()},
    87  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    88  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    89  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    90  		peerHashes:        make(map[string][]common.Hash),
    91  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    92  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    93  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    94  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    95  		peerMissingStates: make(map[string]map[common.Hash]bool),
    96  		signatureLists:	   make(map[string]map[common.Hash][]*common.BlockConfirmSign),
    97  	}
    98  	tester.stateDb = ethdb.NewMemDatabase()
    99  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
   100  
   101  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   102  
   103  	return tester
   104  }
   105  
   106  // makeChain creates a chain of n blocks starting at and including parent.
   107  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   108  // contains a transaction and every 5th an uncle to allow testing correct block
   109  // reassembly.
   110  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash][]*common.BlockConfirmSign) {
   111  	// Generate the block chain
   112  	db := ethdb.NewMemDatabase()
   113  	ppos_storage.NewPPosTemp(db)
   114  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, cbft.New(params.GrapeChainConfig.Cbft, nil, nil, nil), dl.peerDb, n, func(i int, block *core.BlockGen) {
   115  		block.SetCoinbase(common.Address{seed})
   116  
   117  		// If a heavy chain is requested, delay blocks to raise difficulty
   118  		if heavy {
   119  			block.OffsetTime(-1)
   120  		}
   121  		// If the block number is multiple of 3, send a bonus transaction to the miner
   122  		if parent == dl.genesis && i%3 == 0 {
   123  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   124  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   125  			if err != nil {
   126  				panic(err)
   127  			}
   128  			block.AddTx(tx)
   129  		}
   130  		// If the block number is a multiple of 5, add a bonus uncle to the block
   131  		if i > 0 && i%5 == 0 {
   132  			block.AddUncle(&types.Header{
   133  				ParentHash: block.PrevBlock(i - 1).Hash(),
   134  				Number:     big.NewInt(block.Number().Int64() - 1),
   135  			})
   136  		}
   137  	})
   138  	// Convert the block-chain into a hash-chain and header/block maps
   139  	hashes := make([]common.Hash, n+1)
   140  	hashes[len(hashes)-1] = parent.Hash()
   141  
   142  	headerm := make(map[common.Hash]*types.Header, n+1)
   143  	headerm[parent.Hash()] = parent.Header()
   144  
   145  	blockm := make(map[common.Hash]*types.Block, n+1)
   146  	blockm[parent.Hash()] = parent
   147  
   148  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   149  	receiptm[parent.Hash()] = parentReceipts
   150  
   151  	signm := make(map[common.Hash][]*common.BlockConfirmSign, n+1)
   152  	signm[parent.Hash()] = append(make([]*common.BlockConfirmSign, 0), new(common.BlockConfirmSign))
   153  
   154  	for i, b := range blocks {
   155  		hashes[len(hashes)-i-2] = b.Hash()
   156  		headerm[b.Hash()] = b.Header()
   157  		blockm[b.Hash()] = b
   158  		receiptm[b.Hash()] = receipts[i]
   159  		signm[b.Hash()] = append(make([]*common.BlockConfirmSign, 0), new(common.BlockConfirmSign))
   160  	}
   161  	return hashes, headerm, blockm, receiptm, signm
   162  }
   163  
   164  // makeChainFork creates two chains of length n, such that h1[:f] and
   165  // h2[:f] are different but have a common suffix of length n-f.
   166  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, 
   167  	map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts, map[common.Hash][]*common.BlockConfirmSign, map[common.Hash][]*common.BlockConfirmSign) {
   168  	// Create the common suffix
   169  	hashes, headers, blocks, receipts, signm := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   170  
   171  	// Create the forks, making the second heavier if non balanced forks were requested
   172  	hashes1, headers1, blocks1, receipts1, signm1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   173  	hashes1 = append(hashes1, hashes[1:]...)
   174  
   175  	heavy := false
   176  	if !balanced {
   177  		heavy = true
   178  	}
   179  	hashes2, headers2, blocks2, receipts2, signm2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   180  	hashes2 = append(hashes2, hashes[1:]...)
   181  
   182  	for hash, header := range headers {
   183  		headers1[hash] = header
   184  		headers2[hash] = header
   185  	}
   186  	for hash, block := range blocks {
   187  		blocks1[hash] = block
   188  		blocks2[hash] = block
   189  	}
   190  	for hash, receipt := range receipts {
   191  		receipts1[hash] = receipt
   192  		receipts2[hash] = receipt
   193  	}
   194  	for hash, signs := range signm {
   195  		signm1[hash] = signs
   196  		signm2[hash] = signs
   197  	}
   198  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2, signm1, signm2
   199  }
   200  
   201  // terminate aborts any operations on the embedded downloader and releases all
   202  // held resources.
   203  func (dl *downloadTester) terminate() {
   204  	dl.downloader.Terminate()
   205  }
   206  
   207  // sync starts synchronizing with a remote peer, blocking until it completes.
   208  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   209  	dl.lock.RLock()
   210  	hash := dl.peerHashes[id][0]
   211  	// If no particular TD was requested, load from the peer's blockchain
   212  	if td == nil {
   213  		td = big.NewInt(1)
   214  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   215  			td = diff
   216  		}
   217  	}
   218  	dl.lock.RUnlock()
   219  
   220  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   221  	err := dl.downloader.synchronise(id, hash, td, mode)
   222  	select {
   223  	case <-dl.downloader.cancelCh:
   224  		// Ok, downloader fully cancelled after sync cycle
   225  	default:
   226  		// Downloader is still accepting packets, can block a peer up
   227  		panic("downloader active post sync cycle") // panic will be caught by tester
   228  	}
   229  	return err
   230  }
   231  
   232  // HasHeader checks if a header is present in the testers canonical chain.
   233  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   234  	return dl.GetHeaderByHash(hash) != nil
   235  }
   236  
   237  // HasBlock checks if a block is present in the testers canonical chain.
   238  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   239  	return dl.GetBlockByHash(hash) != nil
   240  }
   241  
   242  // GetHeader retrieves a header from the testers canonical chain.
   243  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   244  	dl.lock.RLock()
   245  	defer dl.lock.RUnlock()
   246  
   247  	return dl.ownHeaders[hash]
   248  }
   249  
   250  // GetBlock retrieves a block from the testers canonical chain.
   251  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   252  	dl.lock.RLock()
   253  	defer dl.lock.RUnlock()
   254  
   255  	return dl.ownBlocks[hash]
   256  }
   257  
   258  // CurrentHeader retrieves the current head header from the canonical chain.
   259  func (dl *downloadTester) CurrentHeader() *types.Header {
   260  	dl.lock.RLock()
   261  	defer dl.lock.RUnlock()
   262  
   263  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   264  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   265  			return header
   266  		}
   267  	}
   268  	return dl.genesis.Header()
   269  }
   270  
   271  // CurrentBlock retrieves the current head block from the canonical chain.
   272  func (dl *downloadTester) CurrentBlock() *types.Block {
   273  	dl.lock.RLock()
   274  	defer dl.lock.RUnlock()
   275  
   276  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   277  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   278  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   279  				return block
   280  			}
   281  		}
   282  	}
   283  	return dl.genesis
   284  }
   285  
   286  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   287  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   288  	dl.lock.RLock()
   289  	defer dl.lock.RUnlock()
   290  
   291  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   292  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   293  			return block
   294  		}
   295  	}
   296  	return dl.genesis
   297  }
   298  
   299  // FastSyncCommitHead manually sets the head block to a given hash.
   300  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   301  	// For now only check that the state trie is correct
   302  	if block := dl.GetBlockByHash(hash); block != nil {
   303  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0)
   304  		return err
   305  	}
   306  	return fmt.Errorf("non existent block: %x", hash[:4])
   307  }
   308  
   309  // GetTd retrieves the block's total difficulty from the canonical chain.
   310  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   311  	dl.lock.RLock()
   312  	defer dl.lock.RUnlock()
   313  
   314  	return dl.ownChainTd[hash]
   315  }
   316  
   317  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   318  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   319  	dl.lock.Lock()
   320  	defer dl.lock.Unlock()
   321  
   322  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   323  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   324  		return 0, errors.New("unknown parent")
   325  	}
   326  	for i := 1; i < len(headers); i++ {
   327  		if headers[i].ParentHash != headers[i-1].Hash() {
   328  			return i, errors.New("unknown parent")
   329  		}
   330  	}
   331  	// Do a full insert if pre-checks passed
   332  	for i, header := range headers {
   333  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   334  			continue
   335  		}
   336  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   337  			return i, errors.New("unknown parent")
   338  		}
   339  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   340  		dl.ownHeaders[header.Hash()] = header
   341  	}
   342  	return len(headers), nil
   343  }
   344  
   345  // InsertChain injects a new batch of blocks into the simulated chain.
   346  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   347  	dl.lock.Lock()
   348  	defer dl.lock.Unlock()
   349  
   350  	for i, block := range blocks {
   351  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   352  			return i, errors.New("unknown parent")
   353  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   354  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   355  		}
   356  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   357  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   358  			dl.ownHeaders[block.Hash()] = block.Header()
   359  		}
   360  		dl.ownBlocks[block.Hash()] = block
   361  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   362  	}
   363  	return len(blocks), nil
   364  }
   365  
   366  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   367  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   368  	dl.lock.Lock()
   369  	defer dl.lock.Unlock()
   370  
   371  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   372  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   373  			return i, errors.New("unknown owner")
   374  		}
   375  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   376  			return i, errors.New("unknown parent")
   377  		}
   378  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   379  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   380  	}
   381  	return len(blocks), nil
   382  }
   383  
   384  // Rollback removes some recently added elements from the chain.
   385  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   386  	dl.lock.Lock()
   387  	defer dl.lock.Unlock()
   388  
   389  	for i := len(hashes) - 1; i >= 0; i-- {
   390  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   391  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   392  		}
   393  		delete(dl.ownChainTd, hashes[i])
   394  		delete(dl.ownHeaders, hashes[i])
   395  		delete(dl.ownReceipts, hashes[i])
   396  		delete(dl.ownBlocks, hashes[i])
   397  	}
   398  }
   399  
   400  // newPeer registers a new block download source into the downloader.
   401  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, signs map[common.Hash][]*common.BlockConfirmSign) error {
   402  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, signs, 0)
   403  }
   404  
   405  // newSlowPeer registers a new block download source into the downloader, with a
   406  // specific delay time on processing the network packets sent to it, simulating
   407  // potentially slow network IO.
   408  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, signs map[common.Hash][]*common.BlockConfirmSign, delay time.Duration) error {
   409  	dl.lock.Lock()
   410  	defer dl.lock.Unlock()
   411  
   412  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   413  	if err == nil {
   414  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   415  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   416  		copy(dl.peerHashes[id], hashes)
   417  
   418  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   419  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   420  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   421  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   422  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   423  		dl.signatureLists[id] = make(map[common.Hash][]*common.BlockConfirmSign)
   424  
   425  		genesis := hashes[len(hashes)-1]
   426  		if header := headers[genesis]; header != nil {
   427  			dl.peerHeaders[id][genesis] = header
   428  		}
   429  		if block := blocks[genesis]; block != nil {
   430  			dl.peerBlocks[id][genesis] = block
   431  		}
   432  
   433  		for i := len(hashes) - 2; i >= 0; i-- {
   434  			hash := hashes[i]
   435  
   436  			if header, ok := headers[hash]; ok {
   437  				dl.peerHeaders[id][hash] = header
   438  			}
   439  			if block, ok := blocks[hash]; ok {
   440  				dl.peerBlocks[id][hash] = block
   441  			}
   442  			if receipt, ok := receipts[hash]; ok {
   443  				dl.peerReceipts[id][hash] = receipt
   444  			}
   445  			if sign, ok := signs[hash]; ok {
   446  				dl.signatureLists[id][hash] = sign
   447  			}
   448  		}
   449  	}
   450  	return err
   451  }
   452  
   453  // dropPeer simulates a hard peer removal from the connection pool.
   454  func (dl *downloadTester) dropPeer(id string) {
   455  	dl.lock.Lock()
   456  	defer dl.lock.Unlock()
   457  
   458  	delete(dl.peerHashes, id)
   459  	delete(dl.peerHeaders, id)
   460  	delete(dl.peerBlocks, id)
   461  	delete(dl.peerChainTds, id)
   462  
   463  	dl.downloader.UnregisterPeer(id)
   464  }
   465  
   466  type downloadTesterPeer struct {
   467  	dl    *downloadTester
   468  	id    string
   469  	delay time.Duration
   470  	lock  sync.RWMutex
   471  }
   472  
   473  // setDelay is a thread safe setter for the network delay value.
   474  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   475  	dlp.lock.Lock()
   476  	defer dlp.lock.Unlock()
   477  
   478  	dlp.delay = delay
   479  }
   480  
   481  // waitDelay is a thread safe way to sleep for the configured time.
   482  func (dlp *downloadTesterPeer) waitDelay() {
   483  	dlp.lock.RLock()
   484  	delay := dlp.delay
   485  	dlp.lock.RUnlock()
   486  
   487  	time.Sleep(delay)
   488  }
   489  
   490  // Head constructs a function to retrieve a peer's current head hash
   491  // and total difficulty.
   492  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   493  	dlp.dl.lock.RLock()
   494  	defer dlp.dl.lock.RUnlock()
   495  
   496  	return dlp.dl.peerHashes[dlp.id][0], nil
   497  }
   498  
   499  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   500  // origin; associated with a particular peer in the download tester. The returned
   501  // function can be used to retrieve batches of headers from the particular peer.
   502  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   503  	// Find the canonical number of the hash
   504  	dlp.dl.lock.RLock()
   505  	number := uint64(0)
   506  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   507  		if hash == origin {
   508  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   509  			break
   510  		}
   511  	}
   512  	dlp.dl.lock.RUnlock()
   513  
   514  	// Use the absolute header fetcher to satisfy the query
   515  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   516  }
   517  
   518  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   519  // origin; associated with a particular peer in the download tester. The returned
   520  // function can be used to retrieve batches of headers from the particular peer.
   521  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   522  	dlp.waitDelay()
   523  
   524  	dlp.dl.lock.RLock()
   525  	defer dlp.dl.lock.RUnlock()
   526  
   527  	// Gather the next batch of headers
   528  	hashes := dlp.dl.peerHashes[dlp.id]
   529  	headers := dlp.dl.peerHeaders[dlp.id]
   530  	result := make([]*types.Header, 0, amount)
   531  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   532  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   533  			result = append(result, header)
   534  		}
   535  	}
   536  	// Delay delivery a bit to allow attacks to unfold
   537  	go func() {
   538  		time.Sleep(time.Millisecond)
   539  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   540  	}()
   541  	return nil
   542  }
   543  
   544  // RequestBodies constructs a getBlockBodies method associated with a particular
   545  // peer in the download tester. The returned function can be used to retrieve
   546  // batches of block bodies from the particularly requested peer.
   547  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   548  	dlp.waitDelay()
   549  
   550  	dlp.dl.lock.RLock()
   551  	defer dlp.dl.lock.RUnlock()
   552  
   553  	blocks := dlp.dl.peerBlocks[dlp.id]
   554  	signlist := dlp.dl.signatureLists[dlp.id]
   555  
   556  	transactions := make([][]*types.Transaction, 0, len(hashes))
   557  	uncles := make([][]*types.Header, 0, len(hashes))
   558  	signatureLists := make([][]*common.BlockConfirmSign, 0)
   559  
   560  	for _, hash := range hashes {
   561  		if block, ok := blocks[hash]; ok {
   562  			transactions = append(transactions, block.Transactions())
   563  			uncles = append(uncles, block.Uncles())
   564  			signatureLists = append(signatureLists, signlist[hash])
   565  		}
   566  	}
   567  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles, signatureLists)
   568  
   569  	return nil
   570  }
   571  
   572  // RequestReceipts constructs a getReceipts method associated with a particular
   573  // peer in the download tester. The returned function can be used to retrieve
   574  // batches of block receipts from the particularly requested peer.
   575  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   576  	dlp.waitDelay()
   577  
   578  	dlp.dl.lock.RLock()
   579  	defer dlp.dl.lock.RUnlock()
   580  
   581  	receipts := dlp.dl.peerReceipts[dlp.id]
   582  
   583  	results := make([][]*types.Receipt, 0, len(hashes))
   584  	for _, hash := range hashes {
   585  		if receipt, ok := receipts[hash]; ok {
   586  			results = append(results, receipt)
   587  		}
   588  	}
   589  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   590  
   591  	return nil
   592  }
   593  
   594  // RequestNodeData constructs a getNodeData method associated with a particular
   595  // peer in the download tester. The returned function can be used to retrieve
   596  // batches of node state data from the particularly requested peer.
   597  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   598  	dlp.waitDelay()
   599  
   600  	dlp.dl.lock.RLock()
   601  	defer dlp.dl.lock.RUnlock()
   602  
   603  	results := make([][]byte, 0, len(hashes))
   604  	for _, hash := range hashes {
   605  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   606  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   607  				results = append(results, data)
   608  			}
   609  		}
   610  	}
   611  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   612  
   613  	return nil
   614  }
   615  
   616  func (dlp *downloadTesterPeer) RequestLatestPposStorage() error {
   617  	dlp.waitDelay()
   618  
   619  	dlp.dl.lock.RLock()
   620  	defer dlp.dl.lock.RUnlock()
   621  
   622  	headers := dlp.dl.peerHeaders[dlp.id]
   623  
   624  	latest := new(types.Header)
   625  	pivot := new(types.Header)
   626  	var n uint64 = 0
   627  
   628  	for _, value := range headers {
   629  		if value.Number.Uint64() == 229 {
   630  			pivot = value
   631  		}
   632  		if value.Number.Uint64() >= n {
   633  			latest = value
   634  			n = value.Number.Uint64()
   635  		}
   636  	}
   637  	go dlp.dl.downloader.DeliverPposStorage(dlp.id, latest, pivot, make([]byte, 0))
   638  
   639  	return nil
   640  }
   641  
   642  // assertOwnChain checks if the local chain contains the correct number of items
   643  // of the various chain components.
   644  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   645  	assertOwnForkedChain(t, tester, 1, []int{length})
   646  }
   647  
   648  // assertOwnForkedChain checks if the local forked chain contains the correct
   649  // number of items of the various chain components.
   650  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   651  	// Initialize the counters for the first fork
   652  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   653  
   654  	if receipts < 0 {
   655  		receipts = 1
   656  	}
   657  	// Update the counters for each subsequent fork
   658  	for _, length := range lengths[1:] {
   659  		headers += length - common
   660  		blocks += length - common
   661  		receipts += length - common - fsMinFullBlocks
   662  	}
   663  	switch tester.downloader.mode {
   664  	case FullSync:
   665  		receipts = 1
   666  	case LightSync:
   667  		blocks, receipts = 1, 1
   668  	}
   669  	if hs := len(tester.ownHeaders); hs != headers {
   670  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   671  	}
   672  	if bs := len(tester.ownBlocks); bs != blocks {
   673  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   674  	}
   675  	if rs := len(tester.ownReceipts); rs != receipts {
   676  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   677  	}
   678  	// Verify the state trie too for fast syncs
   679  	/*if tester.downloader.mode == FastSync {
   680  		pivot := uint64(0)
   681  		var index int
   682  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   683  			index = pivot
   684  		} else {
   685  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   686  		}
   687  		if index > 0 {
   688  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   689  				t.Fatalf("state reconstruction failed: %v", err)
   690  			}
   691  		}
   692  	}*/
   693  }
   694  
   695  // Tests that simple synchronization against a canonical chain works correctly.
   696  // In this test common ancestor lookup should be short circuited and not require
   697  // binary searching.
   698  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   699  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   700  //func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   701  //func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   702  //func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   703  //func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   704  
   705  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   706  	t.Parallel()
   707  
   708  	tester := newTester()
   709  	defer tester.terminate()
   710  
   711  	// Create a small enough block chain to download
   712  	targetBlocks := blockCacheItems - 15
   713  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   714  
   715  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, signm)
   716  
   717  	// Synchronise with the peer and make sure all relevant data was retrieved
   718  	if err := tester.sync("peer", nil, mode); err != nil {
   719  		t.Fatalf("failed to synchronise blocks: %v", err)
   720  	}
   721  	assertOwnChain(t, tester, targetBlocks+1)
   722  }
   723  
   724  // Tests that if a large batch of blocks are being downloaded, it is throttled
   725  // until the cached blocks are retrieved.
   726  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   727  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   728  //func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   729  //func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   730  //func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   731  
   732  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   733  	t.Parallel()
   734  	tester := newTester()
   735  	defer tester.terminate()
   736  
   737  	// Create a long block chain to download and the tester
   738  	targetBlocks := 8 * blockCacheItems
   739  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   740  
   741  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, signm)
   742  
   743  	// Wrap the importer to allow stepping
   744  	blocked, proceed := uint32(0), make(chan struct{})
   745  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   746  		atomic.StoreUint32(&blocked, uint32(len(results)))
   747  		<-proceed
   748  	}
   749  	// Start a synchronisation concurrently
   750  	errc := make(chan error)
   751  	go func() {
   752  		errc <- tester.sync("peer", nil, mode)
   753  	}()
   754  	// Iteratively take some blocks, always checking the retrieval count
   755  	for {
   756  		// Check the retrieval count synchronously (! reason for this ugly block)
   757  		tester.lock.RLock()
   758  		retrieved := len(tester.ownBlocks)
   759  		tester.lock.RUnlock()
   760  		if retrieved >= targetBlocks+1 {
   761  			break
   762  		}
   763  		// Wait a bit for sync to throttle itself
   764  		var cached, frozen int
   765  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   766  			time.Sleep(25 * time.Millisecond)
   767  
   768  			tester.lock.Lock()
   769  			tester.downloader.queue.lock.Lock()
   770  			cached = len(tester.downloader.queue.blockDonePool)
   771  			if mode == FastSync {
   772  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   773  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   774  					cached = receipts
   775  					//}
   776  				}
   777  			}
   778  			frozen = int(atomic.LoadUint32(&blocked))
   779  			retrieved = len(tester.ownBlocks)
   780  			tester.downloader.queue.lock.Unlock()
   781  			tester.lock.Unlock()
   782  
   783  			if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
   784  				break
   785  			}
   786  		}
   787  		// Make sure we filled up the cache, then exhaust it
   788  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   789  
   790  		tester.lock.RLock()
   791  		retrieved = len(tester.ownBlocks)
   792  		tester.lock.RUnlock()
   793  		if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
   794  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   795  		}
   796  		// Permit the blocked blocks to import
   797  		if atomic.LoadUint32(&blocked) > 0 {
   798  			atomic.StoreUint32(&blocked, uint32(0))
   799  			proceed <- struct{}{}
   800  		}
   801  	}
   802  	// Check that we haven't pulled more blocks than available
   803  	assertOwnChain(t, tester, targetBlocks+1)
   804  	if err := <-errc; err != nil {
   805  		t.Fatalf("block synchronization failed: %v", err)
   806  	}
   807  }
   808  
   809  // Tests that simple synchronization against a forked chain works correctly. In
   810  // this test common ancestor lookup should *not* be short circuited, and a full
   811  // binary search should be executed.
   812  //func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   813  //func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   814  //func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   815  //func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   816  //func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   817  //func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   818  
   819  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   820  	t.Parallel()
   821  
   822  	tester := newTester()
   823  	defer tester.terminate()
   824  
   825  	// Create a long enough forked chain
   826  	common, fork := MaxHashFetch, 2*MaxHashFetch
   827  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, signA, signB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   828  
   829  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA, signA)
   830  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB, signB)
   831  
   832  	// Synchronise with the peer and make sure all blocks were retrieved
   833  	if err := tester.sync("fork A", nil, mode); err != nil {
   834  		t.Fatalf("failed to synchronise blocks: %v", err)
   835  	}
   836  	assertOwnChain(t, tester, common+fork+1)
   837  
   838  	// Synchronise with the second peer and make sure that fork is pulled too
   839  	if err := tester.sync("fork B", nil, mode); err != nil {
   840  		t.Fatalf("failed to synchronise blocks: %v", err)
   841  	}
   842  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   843  }
   844  
   845  // Tests that synchronising against a much shorter but much heavyer fork works
   846  // corrently and is not dropped.
   847  //func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   848  //func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   849  //func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   850  //func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   851  //func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   852  //func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   853  
   854  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   855  	t.Parallel()
   856  
   857  	tester := newTester()
   858  	defer tester.terminate()
   859  
   860  	// Create a long enough forked chain
   861  	common, fork := MaxHashFetch, 4*MaxHashFetch
   862  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, signA, signB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   863  
   864  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA, signA)
   865  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB, signB)
   866  
   867  	// Synchronise with the peer and make sure all blocks were retrieved
   868  	if err := tester.sync("light", nil, mode); err != nil {
   869  		t.Fatalf("failed to synchronise blocks: %v", err)
   870  	}
   871  	assertOwnChain(t, tester, common+fork+1)
   872  
   873  	// Synchronise with the second peer and make sure that fork is pulled too
   874  	if err := tester.sync("heavy", nil, mode); err != nil {
   875  		t.Fatalf("failed to synchronise blocks: %v", err)
   876  	}
   877  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   878  }
   879  
   880  // Tests that chain forks are contained within a certain interval of the current
   881  // chain head, ensuring that malicious peers cannot waste resources by feeding
   882  // long dead chains.
   883  //func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   884  //func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   885  //func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   886  //func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   887  //func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   888  //func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   889  
   890  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   891  	t.Parallel()
   892  
   893  	tester := newTester()
   894  	defer tester.terminate()
   895  
   896  	// Create a long enough forked chain
   897  	common, fork := 13, int(MaxForkAncestry+17)
   898  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, signA, signB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   899  
   900  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA, signA)
   901  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB, signB)
   902  
   903  	// Synchronise with the peer and make sure all blocks were retrieved
   904  	if err := tester.sync("original", nil, mode); err != nil {
   905  		t.Fatalf("failed to synchronise blocks: %v", err)
   906  	}
   907  	assertOwnChain(t, tester, common+fork+1)
   908  
   909  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   910  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   911  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   912  	}
   913  }
   914  
   915  // Tests that chain forks are contained within a certain interval of the current
   916  // chain head for short but heavy forks too. These are a bit special because they
   917  // take different ancestor lookup paths.
   918  //func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   919  //func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   920  //func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   921  //func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   922  //func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   923  //func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   924  
   925  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   926  	t.Parallel()
   927  
   928  	tester := newTester()
   929  	defer tester.terminate()
   930  
   931  	// Create a long enough forked chain
   932  	common, fork := 13, int(MaxForkAncestry+17)
   933  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, signA, signB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   934  
   935  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA, signA)
   936  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB, signB) // Root the fork below the ancestor limit
   937  
   938  	// Synchronise with the peer and make sure all blocks were retrieved
   939  	if err := tester.sync("original", nil, mode); err != nil {
   940  		t.Fatalf("failed to synchronise blocks: %v", err)
   941  	}
   942  	assertOwnChain(t, tester, common+fork+1)
   943  
   944  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   945  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   946  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   947  	}
   948  }
   949  
   950  // Tests that an inactive downloader will not accept incoming block headers and
   951  // bodies.
   952  func TestInactiveDownloader62(t *testing.T) {
   953  	t.Parallel()
   954  
   955  	tester := newTester()
   956  	defer tester.terminate()
   957  
   958  	// Check that neither block headers nor bodies are accepted
   959  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   960  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   961  	}
   962  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}, nil); err != errNoSyncActive {
   963  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   964  	}
   965  }
   966  
   967  // Tests that an inactive downloader will not accept incoming block headers,
   968  // bodies and receipts.
   969  func TestInactiveDownloader63(t *testing.T) {
   970  	t.Parallel()
   971  
   972  	tester := newTester()
   973  	defer tester.terminate()
   974  
   975  	// Check that neither block headers nor bodies are accepted
   976  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   977  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   978  	}
   979  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}, nil); err != errNoSyncActive {
   980  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   981  	}
   982  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   983  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   984  	}
   985  }
   986  
   987  // Tests that a canceled download wipes all previously accumulated state.
   988  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   989  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   990  //func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   991  //func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   992  //func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   993  //func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   994  
   995  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   996  	t.Parallel()
   997  
   998  	tester := newTester()
   999  	defer tester.terminate()
  1000  
  1001  	// Create a small enough block chain to download and the tester
  1002  	targetBlocks := blockCacheItems - 15
  1003  	if targetBlocks >= MaxHashFetch {
  1004  		targetBlocks = MaxHashFetch - 15
  1005  	}
  1006  	if targetBlocks >= MaxHeaderFetch {
  1007  		targetBlocks = MaxHeaderFetch - 15
  1008  	}
  1009  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1010  
  1011  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, signm)
  1012  
  1013  	// Make sure canceling works with a pristine downloader
  1014  	tester.downloader.Cancel()
  1015  	if !tester.downloader.queue.Idle() {
  1016  		t.Errorf("download queue not idle")
  1017  	}
  1018  	// Synchronise with the peer, but cancel afterwards
  1019  	if err := tester.sync("peer", nil, mode); err != nil {
  1020  		t.Fatalf("failed to synchronise blocks: %v", err)
  1021  	}
  1022  	tester.downloader.Cancel()
  1023  	if !tester.downloader.queue.Idle() {
  1024  		t.Errorf("download queue not idle")
  1025  	}
  1026  }
  1027  
  1028  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  1029  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
  1030  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
  1031  //func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
  1032  //func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
  1033  //func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
  1034  //func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
  1035  
  1036  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1037  	t.Parallel()
  1038  
  1039  	tester := newTester()
  1040  	defer tester.terminate()
  1041  
  1042  	// Create various peers with various parts of the chain
  1043  	targetPeers := 8
  1044  	targetBlocks := targetPeers*blockCacheItems - 15
  1045  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1046  
  1047  	for i := 0; i < targetPeers; i++ {
  1048  		id := fmt.Sprintf("peer #%d", i)
  1049  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts, signm)
  1050  	}
  1051  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1052  		t.Fatalf("failed to synchronise blocks: %v", err)
  1053  	}
  1054  	assertOwnChain(t, tester, targetBlocks+1)
  1055  }
  1056  
  1057  // Tests that synchronisations behave well in multi-version protocol environments
  1058  // and not wreak havoc on other nodes in the network.
  1059  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1060  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1061  //func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1062  //func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1063  //func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1064  //func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1065  
  1066  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1067  	t.Parallel()
  1068  
  1069  	tester := newTester()
  1070  	defer tester.terminate()
  1071  
  1072  	// Create a small enough block chain to download
  1073  	targetBlocks := blockCacheItems - 15
  1074  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1075  
  1076  	// Create peers of every type
  1077  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil, signm)
  1078  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts, signm)
  1079  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts, signm)
  1080  
  1081  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1082  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1083  		t.Fatalf("failed to synchronise blocks: %v", err)
  1084  	}
  1085  	assertOwnChain(t, tester, targetBlocks+1)
  1086  
  1087  	// Check that no peers have been dropped off
  1088  	for _, version := range []int{62, 63, 64} {
  1089  		peer := fmt.Sprintf("peer %d", version)
  1090  		if _, ok := tester.peerHashes[peer]; !ok {
  1091  			t.Errorf("%s dropped", peer)
  1092  		}
  1093  	}
  1094  }
  1095  
  1096  // Tests that if a block is empty (e.g. header only), no body request should be
  1097  // made, and instead the header should be assembled into a whole block in itself.
  1098  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1099  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1100  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1101  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1102  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1103  //func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1104  
  1105  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1106  	/*t.Parallel()
  1107  
  1108  	tester := newTester()
  1109  	defer tester.terminate()
  1110  
  1111  	// Create a block chain to download
  1112  	targetBlocks := 2*blockCacheItems - 15
  1113  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1114  
  1115  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, signm)
  1116  
  1117  	// Instrument the downloader to signal body requests
  1118  	bodiesHave, receiptsHave := int32(0), int32(0)
  1119  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1120  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1121  	}
  1122  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1123  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1124  	}
  1125  	// Synchronise with the peer and make sure all blocks were retrieved
  1126  	if err := tester.sync("peer", nil, mode); err != nil {
  1127  		t.Fatalf("failed to synchronise blocks: %v", err)
  1128  	}
  1129  	assertOwnChain(t, tester, targetBlocks+1)
  1130  
  1131  	// Validate the number of block bodies that should have been requested
  1132  	bodiesNeeded, receiptsNeeded := 0, 0
  1133  	for _, block := range blocks {
  1134  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1135  			bodiesNeeded++
  1136  		}
  1137  	}
  1138  	for _, receipt := range receipts {
  1139  		if mode == FastSync && len(receipt) > 0 {
  1140  			receiptsNeeded++
  1141  		}
  1142  	}
  1143  	if int(bodiesHave) != bodiesNeeded {
  1144  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1145  	}
  1146  	if int(receiptsHave) != receiptsNeeded {
  1147  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1148  	}*/
  1149  }
  1150  
  1151  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1152  // stalling the downloader by feeding gapped header chains.
  1153  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1154  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1155  //func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1156  //func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1157  //func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1158  //func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1159  
  1160  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1161  	t.Parallel()
  1162  
  1163  	tester := newTester()
  1164  	defer tester.terminate()
  1165  
  1166  	// Create a small enough block chain to download
  1167  	targetBlocks := blockCacheItems - 15
  1168  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1169  
  1170  	// Attempt a full sync with an attacker feeding gapped headers
  1171  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts, signm)
  1172  	missing := targetBlocks / 2
  1173  	delete(tester.peerHeaders["attack"], hashes[missing])
  1174  
  1175  	if err := tester.sync("attack", nil, mode); err == nil {
  1176  		t.Fatalf("succeeded attacker synchronisation")
  1177  	}
  1178  	// Synchronise with the valid peer and make sure sync succeeds
  1179  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts, signm)
  1180  	if err := tester.sync("valid", nil, mode); err != nil {
  1181  		t.Fatalf("failed to synchronise blocks: %v", err)
  1182  	}
  1183  	assertOwnChain(t, tester, targetBlocks+1)
  1184  }
  1185  
  1186  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1187  // detects the invalid numbering.
  1188  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1189  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1190  //func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1191  //func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1192  //func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1193  //func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1194  
  1195  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1196  	t.Parallel()
  1197  
  1198  	tester := newTester()
  1199  	defer tester.terminate()
  1200  
  1201  	// Create a small enough block chain to download
  1202  	targetBlocks := blockCacheItems - 15
  1203  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1204  
  1205  	// Attempt a full sync with an attacker feeding shifted headers
  1206  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts, signm)
  1207  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1208  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1209  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1210  
  1211  	if err := tester.sync("attack", nil, mode); err == nil {
  1212  		t.Fatalf("succeeded attacker synchronisation")
  1213  	}
  1214  	// Synchronise with the valid peer and make sure sync succeeds
  1215  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts, signm)
  1216  	if err := tester.sync("valid", nil, mode); err != nil {
  1217  		t.Fatalf("failed to synchronise blocks: %v", err)
  1218  	}
  1219  	assertOwnChain(t, tester, targetBlocks+1)
  1220  }
  1221  
  1222  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1223  // for various failure scenarios. Afterwards a full sync is attempted to make
  1224  // sure no state was corrupted.
  1225  //func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1226  //func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1227  //func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1228  
  1229  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1230  	t.Parallel()
  1231  
  1232  	tester := newTester()
  1233  	defer tester.terminate()
  1234  
  1235  	// Create a small enough block chain to download
  1236  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1237  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1238  
  1239  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1240  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1241  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts, signm)
  1242  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1243  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1244  
  1245  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1246  		t.Fatalf("succeeded fast attacker synchronisation")
  1247  	}
  1248  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1249  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1250  	}
  1251  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1252  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1253  	// rolled back, and also the pivot point being reverted to a non-block status.
  1254  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts, signm)
  1255  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1256  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1257  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1258  
  1259  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1260  		t.Fatalf("succeeded block attacker synchronisation")
  1261  	}
  1262  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1263  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1264  	}
  1265  	if mode == FastSync {
  1266  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1267  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1268  		}
  1269  	}
  1270  	// Attempt to sync with an attacker that withholds promised blocks after the
  1271  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1272  	// but already imported pivot block.
  1273  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts, signm)
  1274  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1275  
  1276  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1277  		for i := missing; i <= len(hashes); i++ {
  1278  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1279  		}
  1280  		tester.downloader.syncInitHook = nil
  1281  	}
  1282  
  1283  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1284  		t.Fatalf("succeeded withholding attacker synchronisation")
  1285  	}
  1286  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1287  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1288  	}
  1289  	if mode == FastSync {
  1290  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1291  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1292  		}
  1293  	}
  1294  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1295  	// rollback should also disable fast syncing for this process, verify that we
  1296  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1297  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1298  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts, signm)
  1299  	if err := tester.sync("valid", nil, mode); err != nil {
  1300  		t.Fatalf("failed to synchronise blocks: %v", err)
  1301  	}
  1302  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1303  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1304  	}
  1305  	if mode != LightSync {
  1306  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1307  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1308  		}
  1309  	}
  1310  }
  1311  
  1312  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1313  // afterwards by not sending any useful hashes.
  1314  //func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1315  //func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1316  //func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1317  //func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1318  //func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1319  //func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1320  
  1321  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1322  	t.Parallel()
  1323  
  1324  	tester := newTester()
  1325  	defer tester.terminate()
  1326  
  1327  	hashes, headers, blocks, receipts, signm := tester.makeChain(230, 0, tester.genesis, nil, false)
  1328  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts, signm)
  1329  
  1330  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1331  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1332  	}
  1333  }
  1334  
  1335  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1336  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1337  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1338  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1339  
  1340  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1341  	t.Parallel()
  1342  
  1343  	// Define the disconnection requirement for individual hash fetch errors
  1344  	tests := []struct {
  1345  		result error
  1346  		drop   bool
  1347  	}{
  1348  		{nil, false},                        // Sync succeeded, all is well
  1349  		{errBusy, false},                    // Sync is already in progress, no problem
  1350  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1351  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1352  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1353  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1354  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1355  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1356  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1357  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1358  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1359  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1360  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1361  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1362  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1363  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1364  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1365  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1366  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1367  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1368  	}
  1369  	// Run the tests and check disconnection status
  1370  	tester := newTester()
  1371  	defer tester.terminate()
  1372  
  1373  	for i, tt := range tests {
  1374  		// Register a new peer and ensure it's presence
  1375  		id := fmt.Sprintf("test %d", i)
  1376  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil, nil); err != nil {
  1377  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1378  		}
  1379  		if _, ok := tester.peerHashes[id]; !ok {
  1380  			t.Fatalf("test %d: registered peer not found", i)
  1381  		}
  1382  		// Simulate a synchronisation and check the required result
  1383  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1384  
  1385  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1386  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1387  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1388  		}
  1389  	}
  1390  }
  1391  
  1392  // Tests that synchronisation progress (origin block number, current block number
  1393  // and highest block number) is tracked and updated correctly.
  1394  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1395  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1396  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1397  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1398  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1399  //func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1400  
  1401  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1402  	t.Parallel()
  1403  
  1404  	tester := newTester()
  1405  	defer tester.terminate()
  1406  
  1407  	// Create a small enough block chain to download
  1408  	targetBlocks := blockCacheItems - 15
  1409  	hashes, headers, blocks, receipts, signm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1410  
  1411  	// Set a sync init hook to catch progress changes
  1412  	starting := make(chan struct{})
  1413  	progress := make(chan struct{})
  1414  
  1415  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1416  		starting <- struct{}{}
  1417  		<-progress
  1418  	}
  1419  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1420  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1421  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1422  	}
  1423  	// Synchronise half the blocks and check initial progress
  1424  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts, signm)
  1425  	pending := new(sync.WaitGroup)
  1426  	pending.Add(1)
  1427  
  1428  	go func() {
  1429  		defer pending.Done()
  1430  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1431  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1432  		}
  1433  	}()
  1434  	<-starting
  1435  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1436  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1437  	}
  1438  	progress <- struct{}{}
  1439  	pending.Wait()
  1440  
  1441  	// Synchronise all the blocks and check continuation progress
  1442  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts, signm)
  1443  	pending.Add(1)
  1444  
  1445  	go func() {
  1446  		defer pending.Done()
  1447  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1448  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1449  		}
  1450  	}()
  1451  	<-starting
  1452  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1453  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1454  	}
  1455  	progress <- struct{}{}
  1456  	pending.Wait()
  1457  
  1458  	// Check final progress after successful sync
  1459  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1460  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1461  	}
  1462  }
  1463  
  1464  // Tests that synchronisation progress (origin block number and highest block
  1465  // number) is tracked and updated correctly in case of a fork (or manual head
  1466  // revertal).
  1467  //func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1468  //func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1469  //func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1470  //func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1471  //func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1472  //func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1473  
  1474  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1475  	t.Parallel()
  1476  
  1477  	tester := newTester()
  1478  	defer tester.terminate()
  1479  
  1480  	// Create a forked chain to simulate origin revertal
  1481  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1482  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB, signA, signB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1483  
  1484  	// Set a sync init hook to catch progress changes
  1485  	starting := make(chan struct{})
  1486  	progress := make(chan struct{})
  1487  
  1488  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1489  		starting <- struct{}{}
  1490  		<-progress
  1491  	}
  1492  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1493  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1494  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1495  	}
  1496  	// Synchronise with one of the forks and check progress
  1497  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA, signA)
  1498  	pending := new(sync.WaitGroup)
  1499  	pending.Add(1)
  1500  
  1501  	go func() {
  1502  		defer pending.Done()
  1503  		if err := tester.sync("fork A", nil, mode); err != nil {
  1504  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1505  		}
  1506  	}()
  1507  	<-starting
  1508  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1509  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1510  	}
  1511  	progress <- struct{}{}
  1512  	pending.Wait()
  1513  
  1514  	// Simulate a successful sync above the fork
  1515  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1516  
  1517  	// Synchronise with the second fork and check progress resets
  1518  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB, signB)
  1519  	pending.Add(1)
  1520  
  1521  	go func() {
  1522  		defer pending.Done()
  1523  		if err := tester.sync("fork B", nil, mode); err != nil && err != errNoNeedSync {
  1524  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1525  		}
  1526  	}()
  1527  	<-starting
  1528  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1529  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1530  	}
  1531  	progress <- struct{}{}
  1532  	pending.Wait()
  1533  
  1534  	// Check final progress after successful sync
  1535  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1536  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1537  	}
  1538  }
  1539  
  1540  // Tests that if synchronisation is aborted due to some failure, then the progress
  1541  // origin is not updated in the next sync cycle, as it should be considered the
  1542  // continuation of the previous sync and not a new instance.
  1543  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1544  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1545  //func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1546  //func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1547  //func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1548  //func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1549  
  1550  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1551  	t.Parallel()
  1552  
  1553  	tester := newTester()
  1554  	defer tester.terminate()
  1555  
  1556  	// Create a small enough block chain to download
  1557  	targetBlocks := blockCacheItems - 15
  1558  	hashes, headers, blocks, receipts, singm := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1559  
  1560  	// Set a sync init hook to catch progress changes
  1561  	starting := make(chan struct{})
  1562  	progress := make(chan struct{})
  1563  
  1564  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1565  		starting <- struct{}{}
  1566  		<-progress
  1567  	}
  1568  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1569  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1570  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1571  	}
  1572  	// Attempt a full sync with a faulty peer
  1573  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts, singm)
  1574  	missing := targetBlocks / 2
  1575  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1576  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1577  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1578  
  1579  	pending := new(sync.WaitGroup)
  1580  	pending.Add(1)
  1581  
  1582  	go func() {
  1583  		defer pending.Done()
  1584  		if err := tester.sync("faulty", nil, mode); err == nil {
  1585  			panic("succeeded faulty synchronisation")
  1586  		}
  1587  	}()
  1588  	<-starting
  1589  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1590  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1591  	}
  1592  	progress <- struct{}{}
  1593  	pending.Wait()
  1594  
  1595  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1596  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts, singm)
  1597  	pending.Add(1)
  1598  
  1599  	go func() {
  1600  		defer pending.Done()
  1601  		if err := tester.sync("valid", nil, mode); err != nil {
  1602  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1603  		}
  1604  	}()
  1605  	<-starting
  1606  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1607  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1608  	}
  1609  	progress <- struct{}{}
  1610  	pending.Wait()
  1611  
  1612  	// Check final progress after successful sync
  1613  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1614  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1615  	}
  1616  }
  1617  
  1618  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1619  // the progress height is successfully reduced at the next sync invocation.
  1620  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1621  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1622  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1623  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1624  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1625  //func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1626  
  1627  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1628  	/*t.Parallel()
  1629  
  1630  	tester := newTester()
  1631  	defer tester.terminate()
  1632  
  1633  	// Create a small block chain
  1634  	targetBlocks := blockCacheItems - 15
  1635  	hashes, headers, blocks, receipts, singm := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1636  
  1637  	// Set a sync init hook to catch progress changes
  1638  	starting := make(chan struct{})
  1639  	progress := make(chan struct{})
  1640  
  1641  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1642  		starting <- struct{}{}
  1643  		<-progress
  1644  	}
  1645  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1646  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1647  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1648  	}
  1649  	//  Create and sync with an attacker that promises a higher chain than available
  1650  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts, singm)
  1651  	for i := 1; i < 3; i++ {
  1652  		delete(tester.peerHeaders["attack"], hashes[i])
  1653  		delete(tester.peerBlocks["attack"], hashes[i])
  1654  		delete(tester.peerReceipts["attack"], hashes[i])
  1655  	}
  1656  
  1657  	pending := new(sync.WaitGroup)
  1658  	pending.Add(1)
  1659  
  1660  	go func() {
  1661  		defer pending.Done()
  1662  		if err := tester.sync("attack", nil, mode); err == nil {
  1663  			panic("succeeded attacker synchronisation")
  1664  		}
  1665  	}()
  1666  	<-starting
  1667  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1668  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1669  	}
  1670  	progress <- struct{}{}
  1671  	pending.Wait()
  1672  
  1673  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1674  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts, singm)
  1675  	pending.Add(1)
  1676  
  1677  	go func() {
  1678  		defer pending.Done()
  1679  		if err := tester.sync("valid", nil, mode); err != nil {
  1680  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1681  		}
  1682  	}()
  1683  	<-starting
  1684  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1685  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1686  	}
  1687  	progress <- struct{}{}
  1688  	pending.Wait()
  1689  
  1690  	// Check final progress after successful sync
  1691  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1692  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1693  	}*/
  1694  }
  1695  
  1696  // This test reproduces an issue where unexpected deliveries would
  1697  // block indefinitely if they arrived at the right time.
  1698  // We use data driven subtests to manage this so that it will be parallel on its own
  1699  // and not with the other tests, avoiding intermittent failures.
  1700  func TestDeliverHeadersHang(t *testing.T) {
  1701  	testCases := []struct {
  1702  		protocol int
  1703  		syncMode SyncMode
  1704  	}{
  1705  		{62, FullSync},
  1706  		{63, FullSync},
  1707  		{63, FastSync},
  1708  		{64, FullSync},
  1709  		{64, FastSync},
  1710  		//{64, LightSync},
  1711  	}
  1712  	for _, tc := range testCases {
  1713  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1714  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1715  		})
  1716  	}
  1717  }
  1718  
  1719  type floodingTestPeer struct {
  1720  	peer   Peer
  1721  	tester *downloadTester
  1722  	pend   sync.WaitGroup
  1723  }
  1724  
  1725  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1726  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1727  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1728  }
  1729  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1730  	return ftp.peer.RequestBodies(hashes)
  1731  }
  1732  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1733  	return ftp.peer.RequestReceipts(hashes)
  1734  }
  1735  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1736  	return ftp.peer.RequestNodeData(hashes)
  1737  }
  1738  func (ftp *floodingTestPeer) RequestLatestPposStorage() error {
  1739  	return ftp.peer.RequestLatestPposStorage()
  1740  }
  1741  
  1742  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1743  	deliveriesDone := make(chan struct{}, 500)
  1744  	for i := 0; i < cap(deliveriesDone); i++ {
  1745  		peer := fmt.Sprintf("fake-peer%d", i)
  1746  		ftp.pend.Add(1)
  1747  
  1748  		go func() {
  1749  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1750  			deliveriesDone <- struct{}{}
  1751  			ftp.pend.Done()
  1752  		}()
  1753  	}
  1754  	// Deliver the actual requested headers.
  1755  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1756  	// None of the extra deliveries should block.
  1757  	timeout := time.After(60 * time.Second)
  1758  	for i := 0; i < cap(deliveriesDone); i++ {
  1759  		select {
  1760  		case <-deliveriesDone:
  1761  		case <-timeout:
  1762  			panic("blocked")
  1763  		}
  1764  	}
  1765  	return nil
  1766  }
  1767  
  1768  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1769  	t.Parallel()
  1770  
  1771  	master := newTester()
  1772  	defer master.terminate()
  1773  
  1774  	hashes, headers, blocks, receipts, signm := master.makeChain(230, 0, master.genesis, nil, false)
  1775  	for i := 0; i < 20; i++ {
  1776  		tester := newTester()
  1777  		tester.peerDb = master.peerDb
  1778  
  1779  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts, signm)
  1780  		// Whenever the downloader requests headers, flood it with
  1781  		// a lot of unrequested header deliveries.
  1782  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1783  			peer:   tester.downloader.peers.peers["peer"].peer,
  1784  			tester: tester,
  1785  		}
  1786  		if err := tester.sync("peer", nil, mode); err != nil {
  1787  			t.Errorf("test %d: sync failed: %v", i, err)
  1788  		}
  1789  		tester.terminate()
  1790  
  1791  		// Flush all goroutines to prevent messing with subsequent tests
  1792  		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1793  	}
  1794  }