github.com/intfoundation/intchain@v0.0.0-20220727031208-4316ad31ca73/intprotocol/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	//"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/intfoundation/intchain/common"
    29  	"github.com/intfoundation/intchain/core"
    30  	"github.com/intfoundation/intchain/core/rawdb"
    31  	"github.com/intfoundation/intchain/core/types"
    32  	"github.com/intfoundation/intchain/crypto"
    33  	"github.com/intfoundation/intchain/event"
    34  	"github.com/intfoundation/intchain/intdb"
    35  	"github.com/intfoundation/intchain/params"
    36  	"github.com/intfoundation/intchain/trie"
    37  )
    38  
    39  var (
    40  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    41  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    42  )
    43  
    44  // Reduce some of the parameters to make the tester faster.
    45  func init() {
    46  	MaxForkAncestry = uint64(10000)
    47  	blockCacheItems = 1024
    48  	fsHeaderContCheck = 500 * time.Millisecond
    49  }
    50  
    51  // downloadTester is a test simulator for mocking out local block chain.
    52  type downloadTester struct {
    53  	downloader *Downloader
    54  
    55  	genesis *types.Block   // Genesis blocks used by the tester and peers
    56  	stateDb intdb.Database // Database used by the tester for syncing from peers
    57  	peerDb  intdb.Database // Database of the peers containing all data
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    66  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    67  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    68  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    69  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    70  
    71  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    72  
    73  	lock sync.RWMutex
    74  }
    75  
    76  // newTester creates a new downloader test mocker.
    77  func newTester() *downloadTester {
    78  	testdb := rawdb.NewMemoryDatabase()
    79  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    80  
    81  	tester := &downloadTester{
    82  		genesis:           genesis,
    83  		peerDb:            testdb,
    84  		ownHashes:         []common.Hash{genesis.Hash()},
    85  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    86  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    87  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    88  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    89  		peerHashes:        make(map[string][]common.Hash),
    90  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    91  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    92  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    93  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    94  		peerMissingStates: make(map[string]map[common.Hash]bool),
    95  	}
    96  	tester.stateDb = rawdb.NewMemoryDatabase()
    97  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    98  
    99  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer, nil)
   100  
   101  	return tester
   102  }
   103  
   104  // makeChain creates a chain of n blocks starting at and including parent.
   105  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   106  // contains a transaction and every 5th an uncle to allow testing correct block
   107  // reassembly.
   108  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   109  	// Generate the block chain
   110  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, nil, dl.peerDb, n, func(i int, block *core.BlockGen) {
   111  		block.SetCoinbase(common.Address{seed})
   112  
   113  		// If a heavy chain is requested, delay blocks to raise difficulty
   114  		if heavy {
   115  			block.OffsetTime(-1)
   116  		}
   117  		// If the block number is multiple of 3, send a bonus transaction to the miner
   118  		if parent == dl.genesis && i%3 == 0 {
   119  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   120  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   121  			if err != nil {
   122  				panic(err)
   123  			}
   124  			block.AddTx(tx)
   125  		}
   126  		// If the block number is a multiple of 5, add a bonus uncle to the block
   127  		if i > 0 && i%5 == 0 {
   128  			block.AddUncle(&types.Header{
   129  				ParentHash: block.PrevBlock(i - 1).Hash(),
   130  				Number:     big.NewInt(block.Number().Int64() - 1),
   131  			})
   132  		}
   133  	})
   134  	// Convert the block-chain into a hash-chain and header/block maps
   135  	hashes := make([]common.Hash, n+1)
   136  	hashes[len(hashes)-1] = parent.Hash()
   137  
   138  	headerm := make(map[common.Hash]*types.Header, n+1)
   139  	headerm[parent.Hash()] = parent.Header()
   140  
   141  	blockm := make(map[common.Hash]*types.Block, n+1)
   142  	blockm[parent.Hash()] = parent
   143  
   144  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   145  	receiptm[parent.Hash()] = parentReceipts
   146  
   147  	for i, b := range blocks {
   148  		hashes[len(hashes)-i-2] = b.Hash()
   149  		headerm[b.Hash()] = b.Header()
   150  		blockm[b.Hash()] = b
   151  		receiptm[b.Hash()] = receipts[i]
   152  	}
   153  	return hashes, headerm, blockm, receiptm
   154  }
   155  
   156  // makeChainFork creates two chains of length n, such that h1[:f] and
   157  // h2[:f] are different but have a common suffix of length n-f.
   158  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   159  	// Create the common suffix
   160  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   161  
   162  	// Create the forks, making the second heavyer if non balanced forks were requested
   163  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   164  	hashes1 = append(hashes1, hashes[1:]...)
   165  
   166  	heavy := false
   167  	if !balanced {
   168  		heavy = true
   169  	}
   170  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   171  	hashes2 = append(hashes2, hashes[1:]...)
   172  
   173  	for hash, header := range headers {
   174  		headers1[hash] = header
   175  		headers2[hash] = header
   176  	}
   177  	for hash, block := range blocks {
   178  		blocks1[hash] = block
   179  		blocks2[hash] = block
   180  	}
   181  	for hash, receipt := range receipts {
   182  		receipts1[hash] = receipt
   183  		receipts2[hash] = receipt
   184  	}
   185  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   186  }
   187  
   188  // terminate aborts any operations on the embedded downloader and releases all
   189  // held resources.
   190  func (dl *downloadTester) terminate() {
   191  	dl.downloader.Terminate()
   192  }
   193  
   194  // sync starts synchronizing with a remote peer, blocking until it completes.
   195  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   196  	dl.lock.RLock()
   197  	hash := dl.peerHashes[id][0]
   198  	// If no particular TD was requested, load from the peer's blockchain
   199  	if td == nil {
   200  		td = big.NewInt(1)
   201  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   202  			td = diff
   203  		}
   204  	}
   205  	dl.lock.RUnlock()
   206  
   207  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   208  	err := dl.downloader.synchronise(id, hash, td, mode)
   209  	select {
   210  	case <-dl.downloader.cancelCh:
   211  		// Ok, downloader fully cancelled after sync cycle
   212  	default:
   213  		// Downloader is still accepting packets, can block a peer up
   214  		panic("downloader active post sync cycle") // panic will be caught by tester
   215  	}
   216  	return err
   217  }
   218  
   219  // HasHeader checks if a header is present in the testers canonical chain.
   220  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   221  	return dl.GetHeaderByHash(hash) != nil
   222  }
   223  
   224  // HasBlock checks if a block is present in the testers canonical chain.
   225  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   226  	return dl.GetBlockByHash(hash) != nil
   227  }
   228  
   229  // GetHeader retrieves a header from the testers canonical chain.
   230  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   231  	dl.lock.RLock()
   232  	defer dl.lock.RUnlock()
   233  
   234  	return dl.ownHeaders[hash]
   235  }
   236  
   237  // GetBlock retrieves a block from the testers canonical chain.
   238  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   239  	dl.lock.RLock()
   240  	defer dl.lock.RUnlock()
   241  
   242  	return dl.ownBlocks[hash]
   243  }
   244  
   245  // CurrentHeader retrieves the current head header from the canonical chain.
   246  func (dl *downloadTester) CurrentHeader() *types.Header {
   247  	dl.lock.RLock()
   248  	defer dl.lock.RUnlock()
   249  
   250  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   251  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   252  			return header
   253  		}
   254  	}
   255  	return dl.genesis.Header()
   256  }
   257  
   258  // CurrentBlock retrieves the current head block from the canonical chain.
   259  func (dl *downloadTester) CurrentBlock() *types.Block {
   260  	dl.lock.RLock()
   261  	defer dl.lock.RUnlock()
   262  
   263  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   264  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   265  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   266  				return block
   267  			}
   268  		}
   269  	}
   270  	return dl.genesis
   271  }
   272  
   273  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   274  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   275  	dl.lock.RLock()
   276  	defer dl.lock.RUnlock()
   277  
   278  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   279  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   280  			return block
   281  		}
   282  	}
   283  	return dl.genesis
   284  }
   285  
   286  // FastSyncCommitHead manually sets the head block to a given hash.
   287  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   288  	// For now only check that the state trie is correct
   289  	if block := dl.GetBlockByHash(hash); block != nil {
   290  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
   291  		return err
   292  	}
   293  	return fmt.Errorf("non existent block: %x", hash[:4])
   294  }
   295  
   296  // GetTd retrieves the block's total difficulty from the canonical chain.
   297  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   298  	dl.lock.RLock()
   299  	defer dl.lock.RUnlock()
   300  
   301  	return dl.ownChainTd[hash]
   302  }
   303  
   304  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   305  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   306  	dl.lock.Lock()
   307  	defer dl.lock.Unlock()
   308  
   309  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   310  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   311  		return 0, errors.New("unknown parent")
   312  	}
   313  	for i := 1; i < len(headers); i++ {
   314  		if headers[i].ParentHash != headers[i-1].Hash() {
   315  			return i, errors.New("unknown parent")
   316  		}
   317  	}
   318  	// Do a full insert if pre-checks passed
   319  	for i, header := range headers {
   320  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   321  			continue
   322  		}
   323  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   324  			return i, errors.New("unknown parent")
   325  		}
   326  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   327  		dl.ownHeaders[header.Hash()] = header
   328  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   329  	}
   330  	return len(headers), nil
   331  }
   332  
   333  // InsertChain injects a new batch of blocks into the simulated chain.
   334  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   335  	dl.lock.Lock()
   336  	defer dl.lock.Unlock()
   337  
   338  	for i, block := range blocks {
   339  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   340  			return i, errors.New("unknown parent")
   341  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   342  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   343  		}
   344  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   345  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   346  			dl.ownHeaders[block.Hash()] = block.Header()
   347  		}
   348  		dl.ownBlocks[block.Hash()] = block
   349  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   350  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   351  	}
   352  	return len(blocks), nil
   353  }
   354  
   355  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   356  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   357  	dl.lock.Lock()
   358  	defer dl.lock.Unlock()
   359  
   360  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   361  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   362  			return i, errors.New("unknown owner")
   363  		}
   364  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   365  			return i, errors.New("unknown parent")
   366  		}
   367  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   368  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   369  	}
   370  	return len(blocks), nil
   371  }
   372  
   373  // Rollback removes some recently added elements from the chain.
   374  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   375  	dl.lock.Lock()
   376  	defer dl.lock.Unlock()
   377  
   378  	for i := len(hashes) - 1; i >= 0; i-- {
   379  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   380  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   381  		}
   382  		delete(dl.ownChainTd, hashes[i])
   383  		delete(dl.ownHeaders, hashes[i])
   384  		delete(dl.ownReceipts, hashes[i])
   385  		delete(dl.ownBlocks, hashes[i])
   386  	}
   387  }
   388  
   389  // newPeer registers a new block download source into the downloader.
   390  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   391  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   392  }
   393  
   394  // newSlowPeer registers a new block download source into the downloader, with a
   395  // specific delay time on processing the network packets sent to it, simulating
   396  // potentially slow network IO.
   397  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   398  	dl.lock.Lock()
   399  	defer dl.lock.Unlock()
   400  
   401  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   402  	if err == nil {
   403  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   404  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   405  		copy(dl.peerHashes[id], hashes)
   406  
   407  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   408  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   409  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   410  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   411  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   412  
   413  		genesis := hashes[len(hashes)-1]
   414  		if header := headers[genesis]; header != nil {
   415  			dl.peerHeaders[id][genesis] = header
   416  			dl.peerChainTds[id][genesis] = header.Difficulty
   417  		}
   418  		if block := blocks[genesis]; block != nil {
   419  			dl.peerBlocks[id][genesis] = block
   420  			dl.peerChainTds[id][genesis] = block.Difficulty()
   421  		}
   422  
   423  		for i := len(hashes) - 2; i >= 0; i-- {
   424  			hash := hashes[i]
   425  
   426  			if header, ok := headers[hash]; ok {
   427  				dl.peerHeaders[id][hash] = header
   428  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   429  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   430  				}
   431  			}
   432  			if block, ok := blocks[hash]; ok {
   433  				dl.peerBlocks[id][hash] = block
   434  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   435  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   436  				}
   437  			}
   438  			if receipt, ok := receipts[hash]; ok {
   439  				dl.peerReceipts[id][hash] = receipt
   440  			}
   441  		}
   442  	}
   443  	return err
   444  }
   445  
   446  // dropPeer simulates a hard peer removal from the connection pool.
   447  func (dl *downloadTester) dropPeer(id string) {
   448  	dl.lock.Lock()
   449  	defer dl.lock.Unlock()
   450  
   451  	delete(dl.peerHashes, id)
   452  	delete(dl.peerHeaders, id)
   453  	delete(dl.peerBlocks, id)
   454  	delete(dl.peerChainTds, id)
   455  
   456  	dl.downloader.UnregisterPeer(id)
   457  }
   458  
   459  type downloadTesterPeer struct {
   460  	dl    *downloadTester
   461  	id    string
   462  	delay time.Duration
   463  	lock  sync.RWMutex
   464  }
   465  
   466  // setDelay is a thread safe setter for the network delay value.
   467  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   468  	dlp.lock.Lock()
   469  	defer dlp.lock.Unlock()
   470  
   471  	dlp.delay = delay
   472  }
   473  
   474  // waitDelay is a thread safe way to sleep for the configured time.
   475  func (dlp *downloadTesterPeer) waitDelay() {
   476  	dlp.lock.RLock()
   477  	delay := dlp.delay
   478  	dlp.lock.RUnlock()
   479  
   480  	time.Sleep(delay)
   481  }
   482  
   483  // Head constructs a function to retrieve a peer's current head hash
   484  // and total difficulty.
   485  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   486  	dlp.dl.lock.RLock()
   487  	defer dlp.dl.lock.RUnlock()
   488  
   489  	return dlp.dl.peerHashes[dlp.id][0], nil
   490  }
   491  
   492  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   493  // origin; associated with a particular peer in the download tester. The returned
   494  // function can be used to retrieve batches of headers from the particular peer.
   495  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   496  	// Find the canonical number of the hash
   497  	dlp.dl.lock.RLock()
   498  	number := uint64(0)
   499  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   500  		if hash == origin {
   501  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   502  			break
   503  		}
   504  	}
   505  	dlp.dl.lock.RUnlock()
   506  
   507  	// Use the absolute header fetcher to satisfy the query
   508  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   509  }
   510  
   511  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   512  // origin; associated with a particular peer in the download tester. The returned
   513  // function can be used to retrieve batches of headers from the particular peer.
   514  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   515  	dlp.waitDelay()
   516  
   517  	dlp.dl.lock.RLock()
   518  	defer dlp.dl.lock.RUnlock()
   519  
   520  	// Gather the next batch of headers
   521  	hashes := dlp.dl.peerHashes[dlp.id]
   522  	headers := dlp.dl.peerHeaders[dlp.id]
   523  	result := make([]*types.Header, 0, amount)
   524  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   525  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   526  			result = append(result, header)
   527  		}
   528  	}
   529  	// Delay delivery a bit to allow attacks to unfold
   530  	go func() {
   531  		time.Sleep(time.Millisecond)
   532  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   533  	}()
   534  	return nil
   535  }
   536  
   537  // RequestBodies constructs a getBlockBodies method associated with a particular
   538  // peer in the download tester. The returned function can be used to retrieve
   539  // batches of block bodies from the particularly requested peer.
   540  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   541  	dlp.waitDelay()
   542  
   543  	dlp.dl.lock.RLock()
   544  	defer dlp.dl.lock.RUnlock()
   545  
   546  	blocks := dlp.dl.peerBlocks[dlp.id]
   547  
   548  	transactions := make([][]*types.Transaction, 0, len(hashes))
   549  	uncles := make([][]*types.Header, 0, len(hashes))
   550  
   551  	for _, hash := range hashes {
   552  		if block, ok := blocks[hash]; ok {
   553  			transactions = append(transactions, block.Transactions())
   554  			uncles = append(uncles, block.Uncles())
   555  		}
   556  	}
   557  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   558  
   559  	return nil
   560  }
   561  
   562  // RequestReceipts constructs a getReceipts method associated with a particular
   563  // peer in the download tester. The returned function can be used to retrieve
   564  // batches of block receipts from the particularly requested peer.
   565  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   566  	dlp.waitDelay()
   567  
   568  	dlp.dl.lock.RLock()
   569  	defer dlp.dl.lock.RUnlock()
   570  
   571  	receipts := dlp.dl.peerReceipts[dlp.id]
   572  
   573  	results := make([][]*types.Receipt, 0, len(hashes))
   574  	for _, hash := range hashes {
   575  		if receipt, ok := receipts[hash]; ok {
   576  			results = append(results, receipt)
   577  		}
   578  	}
   579  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   580  
   581  	return nil
   582  }
   583  
   584  // RequestNodeData constructs a getNodeData method associated with a particular
   585  // peer in the download tester. The returned function can be used to retrieve
   586  // batches of node state data from the particularly requested peer.
   587  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   588  	dlp.waitDelay()
   589  
   590  	dlp.dl.lock.RLock()
   591  	defer dlp.dl.lock.RUnlock()
   592  
   593  	results := make([][]byte, 0, len(hashes))
   594  	for _, hash := range hashes {
   595  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   596  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   597  				results = append(results, data)
   598  			}
   599  		}
   600  	}
   601  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   602  
   603  	return nil
   604  }
   605  
   606  // assertOwnChain checks if the local chain contains the correct number of items
   607  // of the various chain components.
   608  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   609  	assertOwnForkedChain(t, tester, 1, []int{length})
   610  }
   611  
   612  // assertOwnForkedChain checks if the local forked chain contains the correct
   613  // number of items of the various chain components.
   614  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   615  	// Initialize the counters for the first fork
   616  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   617  
   618  	if receipts < 0 {
   619  		receipts = 1
   620  	}
   621  	// Update the counters for each subsequent fork
   622  	for _, length := range lengths[1:] {
   623  		headers += length - common
   624  		blocks += length - common
   625  		receipts += length - common - fsMinFullBlocks
   626  	}
   627  	switch tester.downloader.mode {
   628  	case FullSync:
   629  		receipts = 1
   630  		if hs := len(tester.ownHeaders); hs != headers {
   631  			t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   632  		}
   633  		if bs := len(tester.ownBlocks); bs != blocks {
   634  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   635  		}
   636  		if rs := len(tester.ownReceipts); rs != receipts {
   637  			t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   638  		}
   639  		// Verify the state trie too for fast syncs
   640  		/*if tester.downloader.mode == FastSync {
   641  			pivot := uint64(0)
   642  			var index int
   643  			if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   644  				index = pivot
   645  			} else {
   646  				index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   647  			}
   648  			if index > 0 {
   649  				if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   650  					t.Fatalf("state reconstruction failed: %v", err)
   651  				}
   652  			}
   653  		}*/
   654  	}
   655  }
   656  
   657  // Tests that simple synchronization against a canonical chain works correctly.
   658  // In this test common ancestor lookup should be short circuited and not require
   659  // binary searching.
   660  //func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   661  //func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   662  //func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   663  //func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   664  //func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   665  //
   666  //func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   667  //	t.Parallel()
   668  //
   669  //	tester := newTester()
   670  //	defer tester.terminate()
   671  //
   672  //	// Create a small enough block chain to download
   673  //	targetBlocks := blockCacheItems - 15
   674  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   675  //
   676  //	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   677  //
   678  //	// Synchronise with the peer and make sure all relevant data was retrieved
   679  //	if err := tester.sync("peer", nil, mode); err != nil {
   680  //		t.Fatalf("failed to synchronise blocks: %v", err)
   681  //	}
   682  //	assertOwnChain(t, tester, targetBlocks+1)
   683  //}
   684  //
   685  //// Tests that if a large batch of blocks are being downloaded, it is throttled
   686  //// until the cached blocks are retrieved.
   687  //func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   688  //func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   689  //func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   690  //func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   691  //func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   692  //
   693  //func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   694  //	t.Parallel()
   695  //	tester := newTester()
   696  //	defer tester.terminate()
   697  //
   698  //	// Create a long block chain to download and the tester
   699  //	targetBlocks := 8 * blockCacheItems
   700  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   701  //
   702  //	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   703  //
   704  //	// Wrap the importer to allow stepping
   705  //	blocked, proceed := uint32(0), make(chan struct{})
   706  //	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   707  //		atomic.StoreUint32(&blocked, uint32(len(results)))
   708  //		<-proceed
   709  //	}
   710  //	// Start a synchronisation concurrently
   711  //	errc := make(chan error)
   712  //	go func() {
   713  //		errc <- tester.sync("peer", nil, mode)
   714  //	}()
   715  //	// Iteratively take some blocks, always checking the retrieval count
   716  //	for {
   717  //		// Check the retrieval count synchronously (! reason for this ugly block)
   718  //		tester.lock.RLock()
   719  //		retrieved := len(tester.ownBlocks)
   720  //		tester.lock.RUnlock()
   721  //		if retrieved >= targetBlocks+1 {
   722  //			break
   723  //		}
   724  //		// Wait a bit for sync to throttle itself
   725  //		var cached, frozen int
   726  //		for start := time.Now(); time.Since(start) < 3*time.Second; {
   727  //			time.Sleep(25 * time.Millisecond)
   728  //
   729  //			tester.lock.Lock()
   730  //			tester.downloader.queue.lock.Lock()
   731  //			cached = len(tester.downloader.queue.blockDonePool)
   732  //			if mode == FastSync {
   733  //				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   734  //					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   735  //					cached = receipts
   736  //					//}
   737  //				}
   738  //			}
   739  //			frozen = int(atomic.LoadUint32(&blocked))
   740  //			retrieved = len(tester.ownBlocks)
   741  //			tester.downloader.queue.lock.Unlock()
   742  //			tester.lock.Unlock()
   743  //
   744  //			if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
   745  //				break
   746  //			}
   747  //		}
   748  //		// Make sure we filled up the cache, then exhaust it
   749  //		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   750  //
   751  //		tester.lock.RLock()
   752  //		retrieved = len(tester.ownBlocks)
   753  //		tester.lock.RUnlock()
   754  //		if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
   755  //			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   756  //		}
   757  //		// Permit the blocked blocks to import
   758  //		if atomic.LoadUint32(&blocked) > 0 {
   759  //			atomic.StoreUint32(&blocked, uint32(0))
   760  //			proceed <- struct{}{}
   761  //		}
   762  //	}
   763  //	// Check that we haven't pulled more blocks than available
   764  //	assertOwnChain(t, tester, targetBlocks+1)
   765  //	if err := <-errc; err != nil {
   766  //		t.Fatalf("block synchronization failed: %v", err)
   767  //	}
   768  //}
   769  //
   770  //// Tests that simple synchronization against a forked chain works correctly. In
   771  //// this test common ancestor lookup should *not* be short circuited, and a full
   772  //// binary search should be executed.
   773  //func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   774  //func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   775  //func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   776  //func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   777  //func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   778  //func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   779  //
   780  //func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   781  //	t.Parallel()
   782  //
   783  //	tester := newTester()
   784  //	defer tester.terminate()
   785  //
   786  //	// Create a long enough forked chain
   787  //	common, fork := MaxHashFetch, 2*MaxHashFetch
   788  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   789  //
   790  //	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   791  //	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   792  //
   793  //	// Synchronise with the peer and make sure all blocks were retrieved
   794  //	if err := tester.sync("fork A", nil, mode); err != nil {
   795  //		t.Fatalf("failed to synchronise blocks: %v", err)
   796  //	}
   797  //	assertOwnChain(t, tester, common+fork+1)
   798  //
   799  //	// Synchronise with the second peer and make sure that fork is pulled too
   800  //	if err := tester.sync("fork B", nil, mode); err != nil {
   801  //		t.Fatalf("failed to synchronise blocks: %v", err)
   802  //	}
   803  //	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   804  //}
   805  //
   806  //// Tests that synchronising against a much shorter but much heavyer fork works
   807  //// corrently and is not dropped.
   808  //func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   809  //func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   810  //func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   811  //func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   812  //func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   813  //func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   814  //
   815  //func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   816  //	t.Parallel()
   817  //
   818  //	tester := newTester()
   819  //	defer tester.terminate()
   820  //
   821  //	// Create a long enough forked chain
   822  //	common, fork := MaxHashFetch, 4*MaxHashFetch
   823  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   824  //
   825  //	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   826  //	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   827  //
   828  //	// Synchronise with the peer and make sure all blocks were retrieved
   829  //	if err := tester.sync("light", nil, mode); err != nil {
   830  //		t.Fatalf("failed to synchronise blocks: %v", err)
   831  //	}
   832  //	assertOwnChain(t, tester, common+fork+1)
   833  //
   834  //	// Synchronise with the second peer and make sure that fork is pulled too
   835  //	if err := tester.sync("heavy", nil, mode); err != nil {
   836  //		t.Fatalf("failed to synchronise blocks: %v", err)
   837  //	}
   838  //	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   839  //}
   840  //
   841  //// Tests that chain forks are contained within a certain interval of the current
   842  //// chain head, ensuring that malicious peers cannot waste resources by feeding
   843  //// long dead chains.
   844  //func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   845  //func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   846  //func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   847  //func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   848  //func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   849  //func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   850  //
   851  //func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   852  //	t.Parallel()
   853  //
   854  //	tester := newTester()
   855  //	defer tester.terminate()
   856  //
   857  //	// Create a long enough forked chain
   858  //	common, fork := 13, int(MaxForkAncestry+17)
   859  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   860  //
   861  //	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   862  //	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   863  //
   864  //	// Synchronise with the peer and make sure all blocks were retrieved
   865  //	if err := tester.sync("original", nil, mode); err != nil {
   866  //		t.Fatalf("failed to synchronise blocks: %v", err)
   867  //	}
   868  //	assertOwnChain(t, tester, common+fork+1)
   869  //
   870  //	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   871  //	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   872  //		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   873  //	}
   874  //}
   875  //
   876  //// Tests that chain forks are contained within a certain interval of the current
   877  //// chain head for short but heavy forks too. These are a bit special because they
   878  //// take different ancestor lookup paths.
   879  //func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   880  //func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   881  //func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   882  //func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   883  //func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   884  //func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   885  //
   886  //func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   887  //	t.Parallel()
   888  //
   889  //	tester := newTester()
   890  //	defer tester.terminate()
   891  //
   892  //	// Create a long enough forked chain
   893  //	common, fork := 13, int(MaxForkAncestry+17)
   894  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   895  //
   896  //	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   897  //	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   898  //
   899  //	// Synchronise with the peer and make sure all blocks were retrieved
   900  //	if err := tester.sync("original", nil, mode); err != nil {
   901  //		t.Fatalf("failed to synchronise blocks: %v", err)
   902  //	}
   903  //	assertOwnChain(t, tester, common+fork+1)
   904  //
   905  //	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   906  //	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   907  //		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   908  //	}
   909  //}
   910  //
   911  //// Tests that an inactive downloader will not accept incoming block headers and
   912  //// bodies.
   913  //func TestInactiveDownloader62(t *testing.T) {
   914  //	t.Parallel()
   915  //
   916  //	tester := newTester()
   917  //	defer tester.terminate()
   918  //
   919  //	// Check that neither block headers nor bodies are accepted
   920  //	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   921  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   922  //	}
   923  //	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   924  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   925  //	}
   926  //}
   927  //
   928  //// Tests that an inactive downloader will not accept incoming block headers,
   929  //// bodies and receipts.
   930  //func TestInactiveDownloader63(t *testing.T) {
   931  //	t.Parallel()
   932  //
   933  //	tester := newTester()
   934  //	defer tester.terminate()
   935  //
   936  //	// Check that neither block headers nor bodies are accepted
   937  //	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   938  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   939  //	}
   940  //	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   941  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   942  //	}
   943  //	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   944  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   945  //	}
   946  //}
   947  //
   948  //// Tests that a canceled download wipes all previously accumulated state.
   949  //func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   950  //func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   951  //func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   952  //func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   953  //func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   954  //func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   955  //
   956  //func testCancel(t *testing.T, protocol int, mode SyncMode) {
   957  //	t.Parallel()
   958  //
   959  //	tester := newTester()
   960  //	defer tester.terminate()
   961  //
   962  //	// Create a small enough block chain to download and the tester
   963  //	targetBlocks := blockCacheItems - 15
   964  //	if targetBlocks >= MaxHashFetch {
   965  //		targetBlocks = MaxHashFetch - 15
   966  //	}
   967  //	if targetBlocks >= MaxHeaderFetch {
   968  //		targetBlocks = MaxHeaderFetch - 15
   969  //	}
   970  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   971  //
   972  //	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   973  //
   974  //	// Make sure canceling works with a pristine downloader
   975  //	tester.downloader.Cancel()
   976  //	if !tester.downloader.queue.Idle() {
   977  //		t.Errorf("download queue not idle")
   978  //	}
   979  //	// Synchronise with the peer, but cancel afterwards
   980  //	if err := tester.sync("peer", nil, mode); err != nil {
   981  //		t.Fatalf("failed to synchronise blocks: %v", err)
   982  //	}
   983  //	tester.downloader.Cancel()
   984  //	if !tester.downloader.queue.Idle() {
   985  //		t.Errorf("download queue not idle")
   986  //	}
   987  //}
   988  //
   989  //// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   990  //func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   991  //func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   992  //func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   993  //func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   994  //func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   995  //func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   996  //
   997  //func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   998  //	t.Parallel()
   999  //
  1000  //	tester := newTester()
  1001  //	defer tester.terminate()
  1002  //
  1003  //	// Create various peers with various parts of the chain
  1004  //	targetPeers := 8
  1005  //	targetBlocks := targetPeers*blockCacheItems - 15
  1006  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1007  //
  1008  //	for i := 0; i < targetPeers; i++ {
  1009  //		id := fmt.Sprintf("peer #%d", i)
  1010  //		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
  1011  //	}
  1012  //	if err := tester.sync("peer #0", nil, mode); err != nil {
  1013  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1014  //	}
  1015  //	assertOwnChain(t, tester, targetBlocks+1)
  1016  //}
  1017  //
  1018  //// Tests that synchronisations behave well in multi-version protocol environments
  1019  //// and not wreak havoc on other nodes in the network.
  1020  //func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1021  //func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1022  //func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1023  //func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1024  //func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1025  //func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1026  //
  1027  //func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1028  //	t.Parallel()
  1029  //
  1030  //	tester := newTester()
  1031  //	defer tester.terminate()
  1032  //
  1033  //	// Create a small enough block chain to download
  1034  //	targetBlocks := blockCacheItems - 15
  1035  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1036  //
  1037  //	// Create peers of every type
  1038  //	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1039  //	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1040  //	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1041  //
  1042  //	// Synchronise with the requested peer and make sure all blocks were retrieved
  1043  //	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1044  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1045  //	}
  1046  //	assertOwnChain(t, tester, targetBlocks+1)
  1047  //
  1048  //	// Check that no peers have been dropped off
  1049  //	for _, version := range []int{62, 63, 64} {
  1050  //		peer := fmt.Sprintf("peer %d", version)
  1051  //		if _, ok := tester.peerHashes[peer]; !ok {
  1052  //			t.Errorf("%s dropped", peer)
  1053  //		}
  1054  //	}
  1055  //}
  1056  //
  1057  //// Tests that if a block is empty (e.g. header only), no body request should be
  1058  //// made, and instead the header should be assembled into a whole block in itself.
  1059  //func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1060  //func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1061  //func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1062  //func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1063  //func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1064  //func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1065  //
  1066  //func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1067  //	t.Parallel()
  1068  //
  1069  //	tester := newTester()
  1070  //	defer tester.terminate()
  1071  //
  1072  //	// Create a block chain to download
  1073  //	targetBlocks := 2*blockCacheItems - 15
  1074  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1075  //
  1076  //	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1077  //
  1078  //	// Instrument the downloader to signal body requests
  1079  //	bodiesHave, receiptsHave := int32(0), int32(0)
  1080  //	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1081  //		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1082  //	}
  1083  //	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1084  //		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1085  //	}
  1086  //	// Synchronise with the peer and make sure all blocks were retrieved
  1087  //	if err := tester.sync("peer", nil, mode); err != nil {
  1088  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1089  //	}
  1090  //	assertOwnChain(t, tester, targetBlocks+1)
  1091  //
  1092  //	// Validate the number of block bodies that should have been requested
  1093  //	bodiesNeeded, receiptsNeeded := 0, 0
  1094  //	for _, block := range blocks {
  1095  //		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1096  //			bodiesNeeded++
  1097  //		}
  1098  //	}
  1099  //	for _, receipt := range receipts {
  1100  //		if mode == FastSync && len(receipt) > 0 {
  1101  //			receiptsNeeded++
  1102  //		}
  1103  //	}
  1104  //	if int(bodiesHave) != bodiesNeeded {
  1105  //		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1106  //	}
  1107  //	if int(receiptsHave) != receiptsNeeded {
  1108  //		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1109  //	}
  1110  //}
  1111  //
  1112  //// Tests that headers are enqueued continuously, preventing malicious nodes from
  1113  //// stalling the downloader by feeding gapped header chains.
  1114  //func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1115  //func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1116  //func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1117  //func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1118  //func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1119  //func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1120  //
  1121  //func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1122  //	t.Parallel()
  1123  //
  1124  //	tester := newTester()
  1125  //	defer tester.terminate()
  1126  //
  1127  //	// Create a small enough block chain to download
  1128  //	targetBlocks := blockCacheItems - 15
  1129  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1130  //
  1131  //	// Attempt a full sync with an attacker feeding gapped headers
  1132  //	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1133  //	missing := targetBlocks / 2
  1134  //	delete(tester.peerHeaders["attack"], hashes[missing])
  1135  //
  1136  //	if err := tester.sync("attack", nil, mode); err == nil {
  1137  //		t.Fatalf("succeeded attacker synchronisation")
  1138  //	}
  1139  //	// Synchronise with the valid peer and make sure sync succeeds
  1140  //	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1141  //	if err := tester.sync("valid", nil, mode); err != nil {
  1142  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1143  //	}
  1144  //	assertOwnChain(t, tester, targetBlocks+1)
  1145  //}
  1146  //
  1147  //// Tests that if requested headers are shifted (i.e. first is missing), the queue
  1148  //// detects the invalid numbering.
  1149  //func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1150  //func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1151  //func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1152  //func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1153  //func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1154  //func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1155  //
  1156  //func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1157  //	t.Parallel()
  1158  //
  1159  //	tester := newTester()
  1160  //	defer tester.terminate()
  1161  //
  1162  //	// Create a small enough block chain to download
  1163  //	targetBlocks := blockCacheItems - 15
  1164  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1165  //
  1166  //	// Attempt a full sync with an attacker feeding shifted headers
  1167  //	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1168  //	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1169  //	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1170  //	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1171  //
  1172  //	if err := tester.sync("attack", nil, mode); err == nil {
  1173  //		t.Fatalf("succeeded attacker synchronisation")
  1174  //	}
  1175  //	// Synchronise with the valid peer and make sure sync succeeds
  1176  //	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1177  //	if err := tester.sync("valid", nil, mode); err != nil {
  1178  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1179  //	}
  1180  //	assertOwnChain(t, tester, targetBlocks+1)
  1181  //}
  1182  //
  1183  //// Tests that upon detecting an invalid header, the recent ones are rolled back
  1184  //// for various failure scenarios. Afterwards a full sync is attempted to make
  1185  //// sure no state was corrupted.
  1186  //func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1187  //func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1188  //func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1189  //
  1190  //func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1191  //	t.Parallel()
  1192  //
  1193  //	tester := newTester()
  1194  //	defer tester.terminate()
  1195  //
  1196  //	// Create a small enough block chain to download
  1197  //	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1198  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1199  //
  1200  //	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1201  //	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1202  //	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1203  //	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1204  //	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1205  //
  1206  //	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1207  //		t.Fatalf("succeeded fast attacker synchronisation")
  1208  //	}
  1209  //	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1210  //		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1211  //	}
  1212  //	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1213  //	// This should result in both the last fsHeaderSafetyNet number of headers being
  1214  //	// rolled back, and also the pivot point being reverted to a non-block status.
  1215  //	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1216  //	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1217  //	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1218  //	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1219  //
  1220  //	if err := tester.sync("block-attack", nil, mode); err == nil {
  1221  //		t.Fatalf("succeeded block attacker synchronisation")
  1222  //	}
  1223  //	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1224  //		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1225  //	}
  1226  //	if mode == FastSync {
  1227  //		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1228  //			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1229  //		}
  1230  //	}
  1231  //	// Attempt to sync with an attacker that withholds promised blocks after the
  1232  //	// fast sync pivot point. This could be a trial to leave the node with a bad
  1233  //	// but already imported pivot block.
  1234  //	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1235  //	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1236  //
  1237  //	tester.downloader.syncInitHook = func(uint64, uint64) {
  1238  //		for i := missing; i <= len(hashes); i++ {
  1239  //			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1240  //		}
  1241  //		tester.downloader.syncInitHook = nil
  1242  //	}
  1243  //
  1244  //	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1245  //		t.Fatalf("succeeded withholding attacker synchronisation")
  1246  //	}
  1247  //	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1248  //		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1249  //	}
  1250  //	if mode == FastSync {
  1251  //		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1252  //			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1253  //		}
  1254  //	}
  1255  //	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1256  //	// rollback should also disable fast syncing for this process, verify that we
  1257  //	// did a fresh full sync. Note, we can't assert anything about the receipts
  1258  //	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1259  //	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1260  //	if err := tester.sync("valid", nil, mode); err != nil {
  1261  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1262  //	}
  1263  //	if hs := len(tester.ownHeaders); hs != len(headers) {
  1264  //		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1265  //	}
  1266  //	if mode != LightSync {
  1267  //		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1268  //			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1269  //		}
  1270  //	}
  1271  //}
  1272  //
  1273  //// Tests that a peer advertising an high TD doesn't get to stall the downloader
  1274  //// afterwards by not sending any useful hashes.
  1275  //func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1276  //func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1277  //func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1278  //func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1279  //func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1280  //func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1281  //
  1282  //func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1283  //	t.Parallel()
  1284  //
  1285  //	tester := newTester()
  1286  //	defer tester.terminate()
  1287  //
  1288  //	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1289  //	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1290  //
  1291  //	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1292  //		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1293  //	}
  1294  //}
  1295  //
  1296  //// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1297  //func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1298  //func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1299  //func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1300  //
  1301  //func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1302  //	t.Parallel()
  1303  //
  1304  //	// Define the disconnection requirement for individual hash fetch errors
  1305  //	tests := []struct {
  1306  //		result error
  1307  //		drop   bool
  1308  //	}{
  1309  //		{nil, false},                        // Sync succeeded, all is well
  1310  //		{errBusy, false},                    // Sync is already in progress, no problem
  1311  //		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1312  //		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1313  //		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1314  //		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1315  //		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1316  //		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1317  //		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1318  //		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1319  //		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1320  //		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1321  //		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1322  //		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1323  //		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1324  //		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1325  //		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1326  //		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1327  //		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1328  //		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1329  //	}
  1330  //	// Run the tests and check disconnection status
  1331  //	tester := newTester()
  1332  //	defer tester.terminate()
  1333  //
  1334  //	for i, tt := range tests {
  1335  //		// Register a new peer and ensure it's presence
  1336  //		id := fmt.Sprintf("test %d", i)
  1337  //		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1338  //			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1339  //		}
  1340  //		if _, ok := tester.peerHashes[id]; !ok {
  1341  //			t.Fatalf("test %d: registered peer not found", i)
  1342  //		}
  1343  //		// Simulate a synchronisation and check the required result
  1344  //		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1345  //
  1346  //		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1347  //		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1348  //			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1349  //		}
  1350  //	}
  1351  //}
  1352  //
  1353  //// Tests that synchronisation progress (origin block number, current block number
  1354  //// and highest block number) is tracked and updated correctly.
  1355  //func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1356  //func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1357  //func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1358  //func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1359  //func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1360  //func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1361  //
  1362  //func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1363  //	t.Parallel()
  1364  //
  1365  //	tester := newTester()
  1366  //	defer tester.terminate()
  1367  //
  1368  //	// Create a small enough block chain to download
  1369  //	targetBlocks := blockCacheItems - 15
  1370  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1371  //
  1372  //	// Set a sync init hook to catch progress changes
  1373  //	starting := make(chan struct{})
  1374  //	progress := make(chan struct{})
  1375  //
  1376  //	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1377  //		starting <- struct{}{}
  1378  //		<-progress
  1379  //	}
  1380  //	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1381  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1382  //		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1383  //	}
  1384  //	// Synchronise half the blocks and check initial progress
  1385  //	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1386  //	pending := new(sync.WaitGroup)
  1387  //	pending.Add(1)
  1388  //
  1389  //	go func() {
  1390  //		defer pending.Done()
  1391  //		if err := tester.sync("peer-half", nil, mode); err != nil {
  1392  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1393  //		}
  1394  //	}()
  1395  //	<-starting
  1396  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1397  //		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1398  //	}
  1399  //	progress <- struct{}{}
  1400  //	pending.Wait()
  1401  //
  1402  //	// Synchronise all the blocks and check continuation progress
  1403  //	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1404  //	pending.Add(1)
  1405  //
  1406  //	go func() {
  1407  //		defer pending.Done()
  1408  //		if err := tester.sync("peer-full", nil, mode); err != nil {
  1409  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1410  //		}
  1411  //	}()
  1412  //	<-starting
  1413  //	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1414  //		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1415  //	}
  1416  //	progress <- struct{}{}
  1417  //	pending.Wait()
  1418  //
  1419  //	// Check final progress after successful sync
  1420  //	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1421  //		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1422  //	}
  1423  //}
  1424  //
  1425  //// Tests that synchronisation progress (origin block number and highest block
  1426  //// number) is tracked and updated correctly in case of a fork (or manual head
  1427  //// revertal).
  1428  //func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1429  //func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1430  //func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1431  //func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1432  //func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1433  //func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1434  //
  1435  //func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1436  //	t.Parallel()
  1437  //
  1438  //	tester := newTester()
  1439  //	defer tester.terminate()
  1440  //
  1441  //	// Create a forked chain to simulate origin revertal
  1442  //	common, fork := MaxHashFetch, 2*MaxHashFetch
  1443  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1444  //
  1445  //	// Set a sync init hook to catch progress changes
  1446  //	starting := make(chan struct{})
  1447  //	progress := make(chan struct{})
  1448  //
  1449  //	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1450  //		starting <- struct{}{}
  1451  //		<-progress
  1452  //	}
  1453  //	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1454  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1455  //		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1456  //	}
  1457  //	// Synchronise with one of the forks and check progress
  1458  //	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1459  //	pending := new(sync.WaitGroup)
  1460  //	pending.Add(1)
  1461  //
  1462  //	go func() {
  1463  //		defer pending.Done()
  1464  //		if err := tester.sync("fork A", nil, mode); err != nil {
  1465  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1466  //		}
  1467  //	}()
  1468  //	<-starting
  1469  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1470  //		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1471  //	}
  1472  //	progress <- struct{}{}
  1473  //	pending.Wait()
  1474  //
  1475  //	// Simulate a successful sync above the fork
  1476  //	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1477  //
  1478  //	// Synchronise with the second fork and check progress resets
  1479  //	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1480  //	pending.Add(1)
  1481  //
  1482  //	go func() {
  1483  //		defer pending.Done()
  1484  //		if err := tester.sync("fork B", nil, mode); err != nil {
  1485  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1486  //		}
  1487  //	}()
  1488  //	<-starting
  1489  //	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1490  //		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1491  //	}
  1492  //	progress <- struct{}{}
  1493  //	pending.Wait()
  1494  //
  1495  //	// Check final progress after successful sync
  1496  //	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1497  //		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1498  //	}
  1499  //}
  1500  //
  1501  //// Tests that if synchronisation is aborted due to some failure, then the progress
  1502  //// origin is not updated in the next sync cycle, as it should be considered the
  1503  //// continuation of the previous sync and not a new instance.
  1504  //func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1505  //func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1506  //func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1507  //func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1508  //func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1509  //func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1510  //
  1511  //func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1512  //	t.Parallel()
  1513  //
  1514  //	tester := newTester()
  1515  //	defer tester.terminate()
  1516  //
  1517  //	// Create a small enough block chain to download
  1518  //	targetBlocks := blockCacheItems - 15
  1519  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1520  //
  1521  //	// Set a sync init hook to catch progress changes
  1522  //	starting := make(chan struct{})
  1523  //	progress := make(chan struct{})
  1524  //
  1525  //	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1526  //		starting <- struct{}{}
  1527  //		<-progress
  1528  //	}
  1529  //	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1530  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1531  //		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1532  //	}
  1533  //	// Attempt a full sync with a faulty peer
  1534  //	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1535  //	missing := targetBlocks / 2
  1536  //	delete(tester.peerHeaders["faulty"], hashes[missing])
  1537  //	delete(tester.peerBlocks["faulty"], hashes[missing])
  1538  //	delete(tester.peerReceipts["faulty"], hashes[missing])
  1539  //
  1540  //	pending := new(sync.WaitGroup)
  1541  //	pending.Add(1)
  1542  //
  1543  //	go func() {
  1544  //		defer pending.Done()
  1545  //		if err := tester.sync("faulty", nil, mode); err == nil {
  1546  //			panic("succeeded faulty synchronisation")
  1547  //		}
  1548  //	}()
  1549  //	<-starting
  1550  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1551  //		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1552  //	}
  1553  //	progress <- struct{}{}
  1554  //	pending.Wait()
  1555  //
  1556  //	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1557  //	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1558  //	pending.Add(1)
  1559  //
  1560  //	go func() {
  1561  //		defer pending.Done()
  1562  //		if err := tester.sync("valid", nil, mode); err != nil {
  1563  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1564  //		}
  1565  //	}()
  1566  //	<-starting
  1567  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1568  //		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1569  //	}
  1570  //	progress <- struct{}{}
  1571  //	pending.Wait()
  1572  //
  1573  //	// Check final progress after successful sync
  1574  //	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1575  //		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1576  //	}
  1577  //}
  1578  //
  1579  //// Tests that if an attacker fakes a chain height, after the attack is detected,
  1580  //// the progress height is successfully reduced at the next sync invocation.
  1581  //func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1582  //func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1583  //func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1584  //func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1585  //func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1586  //func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1587  //
  1588  //func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1589  //	t.Parallel()
  1590  //
  1591  //	tester := newTester()
  1592  //	defer tester.terminate()
  1593  //
  1594  //	// Create a small block chain
  1595  //	targetBlocks := blockCacheItems - 15
  1596  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1597  //
  1598  //	// Set a sync init hook to catch progress changes
  1599  //	starting := make(chan struct{})
  1600  //	progress := make(chan struct{})
  1601  //
  1602  //	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1603  //		starting <- struct{}{}
  1604  //		<-progress
  1605  //	}
  1606  //	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1607  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1608  //		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1609  //	}
  1610  //	//  Create and sync with an attacker that promises a higher chain than available
  1611  //	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1612  //	for i := 1; i < 3; i++ {
  1613  //		delete(tester.peerHeaders["attack"], hashes[i])
  1614  //		delete(tester.peerBlocks["attack"], hashes[i])
  1615  //		delete(tester.peerReceipts["attack"], hashes[i])
  1616  //	}
  1617  //
  1618  //	pending := new(sync.WaitGroup)
  1619  //	pending.Add(1)
  1620  //
  1621  //	go func() {
  1622  //		defer pending.Done()
  1623  //		if err := tester.sync("attack", nil, mode); err == nil {
  1624  //			panic("succeeded attacker synchronisation")
  1625  //		}
  1626  //	}()
  1627  //	<-starting
  1628  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1629  //		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1630  //	}
  1631  //	progress <- struct{}{}
  1632  //	pending.Wait()
  1633  //
  1634  //	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1635  //	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1636  //	pending.Add(1)
  1637  //
  1638  //	go func() {
  1639  //		defer pending.Done()
  1640  //		if err := tester.sync("valid", nil, mode); err != nil {
  1641  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1642  //		}
  1643  //	}()
  1644  //	<-starting
  1645  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1646  //		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1647  //	}
  1648  //	progress <- struct{}{}
  1649  //	pending.Wait()
  1650  //
  1651  //	// Check final progress after successful sync
  1652  //	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1653  //		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1654  //	}
  1655  //}
  1656  //
  1657  //// This test reproduces an issue where unexpected deliveries would
  1658  //// block indefinitely if they arrived at the right time.
  1659  //// We use data driven subtests to manage this so that it will be parallel on its own
  1660  //// and not with the other tests, avoiding intermittent failures.
  1661  //func TestDeliverHeadersHang(t *testing.T) {
  1662  //	testCases := []struct {
  1663  //		protocol int
  1664  //		syncMode SyncMode
  1665  //	}{
  1666  //		{62, FullSync},
  1667  //		{63, FullSync},
  1668  //		{63, FastSync},
  1669  //		{64, FullSync},
  1670  //		{64, FastSync},
  1671  //		{64, LightSync},
  1672  //	}
  1673  //	for _, tc := range testCases {
  1674  //		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1675  //			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1676  //		})
  1677  //	}
  1678  //}
  1679  //
  1680  //type floodingTestPeer struct {
  1681  //	peer   Peer
  1682  //	tester *downloadTester
  1683  //	pend   sync.WaitGroup
  1684  //}
  1685  //
  1686  //func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1687  //func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1688  //	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1689  //}
  1690  //func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1691  //	return ftp.peer.RequestBodies(hashes)
  1692  //}
  1693  //func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1694  //	return ftp.peer.RequestReceipts(hashes)
  1695  //}
  1696  //func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1697  //	return ftp.peer.RequestNodeData(hashes)
  1698  //}
  1699  //
  1700  //func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1701  //	deliveriesDone := make(chan struct{}, 500)
  1702  //	for i := 0; i < cap(deliveriesDone); i++ {
  1703  //		peer := fmt.Sprintf("fake-peer%d", i)
  1704  //		ftp.pend.Add(1)
  1705  //
  1706  //		go func() {
  1707  //			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1708  //			deliveriesDone <- struct{}{}
  1709  //			ftp.pend.Done()
  1710  //		}()
  1711  //	}
  1712  //	// Deliver the actual requested headers.
  1713  //	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1714  //	// None of the extra deliveries should block.
  1715  //	timeout := time.After(60 * time.Second)
  1716  //	for i := 0; i < cap(deliveriesDone); i++ {
  1717  //		select {
  1718  //		case <-deliveriesDone:
  1719  //		case <-timeout:
  1720  //			panic("blocked")
  1721  //		}
  1722  //	}
  1723  //	return nil
  1724  //}
  1725  //
  1726  //func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1727  //	t.Parallel()
  1728  //
  1729  //	master := newTester()
  1730  //	defer master.terminate()
  1731  //
  1732  //	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1733  //	for i := 0; i < 200; i++ {
  1734  //		tester := newTester()
  1735  //		tester.peerDb = master.peerDb
  1736  //
  1737  //		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1738  //		// Whenever the downloader requests headers, flood it with
  1739  //		// a lot of unrequested header deliveries.
  1740  //		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1741  //			peer:   tester.downloader.peers.peers["peer"].peer,
  1742  //			tester: tester,
  1743  //		}
  1744  //		if err := tester.sync("peer", nil, mode); err != nil {
  1745  //			t.Errorf("test %d: sync failed: %v", i, err)
  1746  //		}
  1747  //		tester.terminate()
  1748  //
  1749  //		// Flush all goroutines to prevent messing with subsequent tests
  1750  //		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1751  //	}
  1752  //}