github.com/Gessiux/neatchain@v1.3.1/neatptc/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	//"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/Gessiux/neatchain/utilities/common"
    29  	"github.com/Gessiux/neatchain/chain/core"
    30  	"github.com/Gessiux/neatchain/chain/core/rawdb"
    31  	"github.com/Gessiux/neatchain/chain/core/types"
    32  	"github.com/Gessiux/neatchain/utilities/crypto"
    33  	"github.com/Gessiux/neatchain/utilities/event"
    34  	"github.com/Gessiux/neatchain/neatdb"
    35  	"github.com/Gessiux/neatchain/params"
    36  	"github.com/Gessiux/neatchain/chain/trie"
    37  )
    38  
    39  var (
    40  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    41  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    42  )
    43  
    44  // Reduce some of the parameters to make the tester faster.
    45  func init() {
    46  	MaxForkAncestry = uint64(10000)
    47  	blockCacheItems = 1024
    48  	fsHeaderContCheck = 500 * time.Millisecond
    49  }
    50  
    51  // downloadTester is a test simulator for mocking out local block chain.
    52  type downloadTester struct {
    53  	downloader *Downloader
    54  
    55  	genesis *types.Block   // Genesis blocks used by the tester and peers
    56  	stateDb neatdb.Database // Database used by the tester for syncing from peers
    57  	peerDb  neatdb.Database // Database of the peers containing all data
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    66  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    67  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    68  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    69  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    70  
    71  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    72  
    73  	lock sync.RWMutex
    74  }
    75  
    76  // newTester creates a new downloader test mocker.
    77  func newTester() *downloadTester {
    78  	testdb := rawdb.NewMemoryDatabase()
    79  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    80  
    81  	tester := &downloadTester{
    82  		genesis:           genesis,
    83  		peerDb:            testdb,
    84  		ownHashes:         []common.Hash{genesis.Hash()},
    85  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    86  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    87  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    88  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    89  		peerHashes:        make(map[string][]common.Hash),
    90  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    91  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    92  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    93  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    94  		peerMissingStates: make(map[string]map[common.Hash]bool),
    95  	}
    96  	tester.stateDb = rawdb.NewMemoryDatabase()
    97  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    98  
    99  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer, nil)
   100  
   101  	return tester
   102  }
   103  
   104  // makeChain creates a chain of n blocks starting at and including parent.
   105  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   106  // contains a transaction and every 5th an uncle to allow testing correct block
   107  // reassembly.
   108  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   109  	// Generate the block chain
   110  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, nil, dl.peerDb, n, func(i int, block *core.BlockGen) {
   111  		block.SetCoinbase(common.Address{seed})
   112  
   113  		// If a heavy chain is requested, delay blocks to raise difficulty
   114  		if heavy {
   115  			block.OffsetTime(-1)
   116  		}
   117  		// If the block number is multiple of 3, send a bonus transaction to the miner
   118  		if parent == dl.genesis && i%3 == 0 {
   119  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   120  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   121  			if err != nil {
   122  				panic(err)
   123  			}
   124  			block.AddTx(tx)
   125  		}
   126  		// If the block number is a multiple of 5, add a bonus uncle to the block
   127  		if i > 0 && i%5 == 0 {
   128  			block.AddUncle(&types.Header{
   129  				ParentHash: block.PrevBlock(i - 1).Hash(),
   130  				Number:     big.NewInt(block.Number().Int64() - 1),
   131  			})
   132  		}
   133  	})
   134  	// Convert the block-chain into a hash-chain and header/block maps
   135  	hashes := make([]common.Hash, n+1)
   136  	hashes[len(hashes)-1] = parent.Hash()
   137  
   138  	headerm := make(map[common.Hash]*types.Header, n+1)
   139  	headerm[parent.Hash()] = parent.Header()
   140  
   141  	blockm := make(map[common.Hash]*types.Block, n+1)
   142  	blockm[parent.Hash()] = parent
   143  
   144  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   145  	receiptm[parent.Hash()] = parentReceipts
   146  
   147  	for i, b := range blocks {
   148  		hashes[len(hashes)-i-2] = b.Hash()
   149  		headerm[b.Hash()] = b.Header()
   150  		blockm[b.Hash()] = b
   151  		receiptm[b.Hash()] = receipts[i]
   152  	}
   153  	return hashes, headerm, blockm, receiptm
   154  }
   155  
   156  // makeChainFork creates two chains of length n, such that h1[:f] and
   157  // h2[:f] are different but have a common suffix of length n-f.
   158  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   159  	// Create the common suffix
   160  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   161  
   162  	// Create the forks, making the second heavyer if non balanced forks were requested
   163  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   164  	hashes1 = append(hashes1, hashes[1:]...)
   165  
   166  	heavy := false
   167  	if !balanced {
   168  		heavy = true
   169  	}
   170  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   171  	hashes2 = append(hashes2, hashes[1:]...)
   172  
   173  	for hash, header := range headers {
   174  		headers1[hash] = header
   175  		headers2[hash] = header
   176  	}
   177  	for hash, block := range blocks {
   178  		blocks1[hash] = block
   179  		blocks2[hash] = block
   180  	}
   181  	for hash, receipt := range receipts {
   182  		receipts1[hash] = receipt
   183  		receipts2[hash] = receipt
   184  	}
   185  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   186  }
   187  
   188  // terminate aborts any operations on the embedded downloader and releases all
   189  // held resources.
   190  func (dl *downloadTester) terminate() {
   191  	dl.downloader.Terminate()
   192  }
   193  
   194  // sync starts synchronizing with a remote peer, blocking until it completes.
   195  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   196  	dl.lock.RLock()
   197  	hash := dl.peerHashes[id][0]
   198  	// If no particular TD was requested, load from the peer's blockchain
   199  	if td == nil {
   200  		td = big.NewInt(1)
   201  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   202  			td = diff
   203  		}
   204  	}
   205  	dl.lock.RUnlock()
   206  
   207  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   208  	err := dl.downloader.synchronise(id, hash, td, mode)
   209  	select {
   210  	case <-dl.downloader.cancelCh:
   211  		// Ok, downloader fully cancelled after sync cycle
   212  	default:
   213  		// Downloader is still accepting packets, can block a peer up
   214  		panic("downloader active post sync cycle") // panic will be caught by tester
   215  	}
   216  	return err
   217  }
   218  
   219  // HasHeader checks if a header is present in the testers canonical chain.
   220  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   221  	return dl.GetHeaderByHash(hash) != nil
   222  }
   223  
   224  // HasBlock checks if a block is present in the testers canonical chain.
   225  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   226  	return dl.GetBlockByHash(hash) != nil
   227  }
   228  
   229  // GetHeader retrieves a header from the testers canonical chain.
   230  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   231  	dl.lock.RLock()
   232  	defer dl.lock.RUnlock()
   233  
   234  	return dl.ownHeaders[hash]
   235  }
   236  
   237  // GetBlock retrieves a block from the testers canonical chain.
   238  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   239  	dl.lock.RLock()
   240  	defer dl.lock.RUnlock()
   241  
   242  	return dl.ownBlocks[hash]
   243  }
   244  
   245  // CurrentHeader retrieves the current head header from the canonical chain.
   246  func (dl *downloadTester) CurrentHeader() *types.Header {
   247  	dl.lock.RLock()
   248  	defer dl.lock.RUnlock()
   249  
   250  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   251  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   252  			return header
   253  		}
   254  	}
   255  	return dl.genesis.Header()
   256  }
   257  
   258  // CurrentBlock retrieves the current head block from the canonical chain.
   259  func (dl *downloadTester) CurrentBlock() *types.Block {
   260  	dl.lock.RLock()
   261  	defer dl.lock.RUnlock()
   262  
   263  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   264  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   265  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   266  				return block
   267  			}
   268  		}
   269  	}
   270  	return dl.genesis
   271  }
   272  
   273  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   274  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   275  	dl.lock.RLock()
   276  	defer dl.lock.RUnlock()
   277  
   278  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   279  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   280  			return block
   281  		}
   282  	}
   283  	return dl.genesis
   284  }
   285  
   286  // FastSyncCommitHead manually sets the head block to a given hash.
   287  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   288  	// For now only check that the state trie is correct
   289  	if block := dl.GetBlockByHash(hash); block != nil {
   290  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
   291  		return err
   292  	}
   293  	return fmt.Errorf("non existent block: %x", hash[:4])
   294  }
   295  
   296  // GetTd retrieves the block's total difficulty from the canonical chain.
   297  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   298  	dl.lock.RLock()
   299  	defer dl.lock.RUnlock()
   300  
   301  	return dl.ownChainTd[hash]
   302  }
   303  
   304  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   305  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   306  	dl.lock.Lock()
   307  	defer dl.lock.Unlock()
   308  
   309  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   310  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   311  		return 0, errors.New("unknown parent")
   312  	}
   313  	for i := 1; i < len(headers); i++ {
   314  		if headers[i].ParentHash != headers[i-1].Hash() {
   315  			return i, errors.New("unknown parent")
   316  		}
   317  	}
   318  	// Do a full insert if pre-checks passed
   319  	for i, header := range headers {
   320  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   321  			continue
   322  		}
   323  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   324  			return i, errors.New("unknown parent")
   325  		}
   326  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   327  		dl.ownHeaders[header.Hash()] = header
   328  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   329  	}
   330  	return len(headers), nil
   331  }
   332  
   333  // InsertChain injects a new batch of blocks into the simulated chain.
   334  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   335  	dl.lock.Lock()
   336  	defer dl.lock.Unlock()
   337  
   338  	for i, block := range blocks {
   339  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   340  			return i, errors.New("unknown parent")
   341  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   342  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   343  		}
   344  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   345  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   346  			dl.ownHeaders[block.Hash()] = block.Header()
   347  		}
   348  		dl.ownBlocks[block.Hash()] = block
   349  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   350  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   351  	}
   352  	return len(blocks), nil
   353  }
   354  
   355  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   356  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   357  	dl.lock.Lock()
   358  	defer dl.lock.Unlock()
   359  
   360  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   361  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   362  			return i, errors.New("unknown owner")
   363  		}
   364  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   365  			return i, errors.New("unknown parent")
   366  		}
   367  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   368  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   369  	}
   370  	return len(blocks), nil
   371  }
   372  
   373  // Rollback removes some recently added elements from the chain.
   374  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   375  	dl.lock.Lock()
   376  	defer dl.lock.Unlock()
   377  
   378  	for i := len(hashes) - 1; i >= 0; i-- {
   379  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   380  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   381  		}
   382  		delete(dl.ownChainTd, hashes[i])
   383  		delete(dl.ownHeaders, hashes[i])
   384  		delete(dl.ownReceipts, hashes[i])
   385  		delete(dl.ownBlocks, hashes[i])
   386  	}
   387  }
   388  
   389  // newPeer registers a new block download source into the downloader.
   390  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   391  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   392  }
   393  
   394  // newSlowPeer registers a new block download source into the downloader, with a
   395  // specific delay time on processing the network packets sent to it, simulating
   396  // potentially slow network IO.
   397  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   398  	dl.lock.Lock()
   399  	defer dl.lock.Unlock()
   400  
   401  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   402  	if err == nil {
   403  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   404  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   405  		copy(dl.peerHashes[id], hashes)
   406  
   407  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   408  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   409  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   410  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   411  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   412  
   413  		genesis := hashes[len(hashes)-1]
   414  		if header := headers[genesis]; header != nil {
   415  			dl.peerHeaders[id][genesis] = header
   416  			dl.peerChainTds[id][genesis] = header.Difficulty
   417  		}
   418  		if block := blocks[genesis]; block != nil {
   419  			dl.peerBlocks[id][genesis] = block
   420  			dl.peerChainTds[id][genesis] = block.Difficulty()
   421  		}
   422  
   423  		for i := len(hashes) - 2; i >= 0; i-- {
   424  			hash := hashes[i]
   425  
   426  			if header, ok := headers[hash]; ok {
   427  				dl.peerHeaders[id][hash] = header
   428  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   429  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   430  				}
   431  			}
   432  			if block, ok := blocks[hash]; ok {
   433  				dl.peerBlocks[id][hash] = block
   434  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   435  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   436  				}
   437  			}
   438  			if receipt, ok := receipts[hash]; ok {
   439  				dl.peerReceipts[id][hash] = receipt
   440  			}
   441  		}
   442  	}
   443  	return err
   444  }
   445  
   446  // dropPeer simulates a hard peer removal from the connection pool.
   447  func (dl *downloadTester) dropPeer(id string) {
   448  	dl.lock.Lock()
   449  	defer dl.lock.Unlock()
   450  
   451  	delete(dl.peerHashes, id)
   452  	delete(dl.peerHeaders, id)
   453  	delete(dl.peerBlocks, id)
   454  	delete(dl.peerChainTds, id)
   455  
   456  	dl.downloader.UnregisterPeer(id)
   457  }
   458  
   459  type downloadTesterPeer struct {
   460  	dl    *downloadTester
   461  	id    string
   462  	delay time.Duration
   463  	lock  sync.RWMutex
   464  }
   465  
   466  // setDelay is a thread safe setter for the network delay value.
   467  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   468  	dlp.lock.Lock()
   469  	defer dlp.lock.Unlock()
   470  
   471  	dlp.delay = delay
   472  }
   473  
   474  // waitDelay is a thread safe way to sleep for the configured time.
   475  func (dlp *downloadTesterPeer) waitDelay() {
   476  	dlp.lock.RLock()
   477  	delay := dlp.delay
   478  	dlp.lock.RUnlock()
   479  
   480  	time.Sleep(delay)
   481  }
   482  
   483  // Head constructs a function to retrieve a peer's current head hash
   484  // and total difficulty.
   485  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   486  	dlp.dl.lock.RLock()
   487  	defer dlp.dl.lock.RUnlock()
   488  
   489  	return dlp.dl.peerHashes[dlp.id][0], nil
   490  }
   491  
   492  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   493  // origin; associated with a particular peer in the download tester. The returned
   494  // function can be used to retrieve batches of headers from the particular peer.
   495  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   496  	// Find the canonical number of the hash
   497  	dlp.dl.lock.RLock()
   498  	number := uint64(0)
   499  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   500  		if hash == origin {
   501  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   502  			break
   503  		}
   504  	}
   505  	dlp.dl.lock.RUnlock()
   506  
   507  	// Use the absolute header fetcher to satisfy the query
   508  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   509  }
   510  
   511  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   512  // origin; associated with a particular peer in the download tester. The returned
   513  // function can be used to retrieve batches of headers from the particular peer.
   514  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   515  	dlp.waitDelay()
   516  
   517  	dlp.dl.lock.RLock()
   518  	defer dlp.dl.lock.RUnlock()
   519  
   520  	// Gather the next batch of headers
   521  	hashes := dlp.dl.peerHashes[dlp.id]
   522  	headers := dlp.dl.peerHeaders[dlp.id]
   523  	result := make([]*types.Header, 0, amount)
   524  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   525  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   526  			result = append(result, header)
   527  		}
   528  	}
   529  	// Delay delivery a bit to allow attacks to unfold
   530  	go func() {
   531  		time.Sleep(time.Millisecond)
   532  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   533  	}()
   534  	return nil
   535  }
   536  
   537  // RequestBodies constructs a getBlockBodies method associated with a particular
   538  // peer in the download tester. The returned function can be used to retrieve
   539  // batches of block bodies from the particularly requested peer.
   540  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   541  	dlp.waitDelay()
   542  
   543  	dlp.dl.lock.RLock()
   544  	defer dlp.dl.lock.RUnlock()
   545  
   546  	blocks := dlp.dl.peerBlocks[dlp.id]
   547  
   548  	transactions := make([][]*types.Transaction, 0, len(hashes))
   549  	uncles := make([][]*types.Header, 0, len(hashes))
   550  
   551  	for _, hash := range hashes {
   552  		if block, ok := blocks[hash]; ok {
   553  			transactions = append(transactions, block.Transactions())
   554  			uncles = append(uncles, block.Uncles())
   555  		}
   556  	}
   557  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   558  
   559  	return nil
   560  }
   561  
   562  // RequestReceipts constructs a getReceipts method associated with a particular
   563  // peer in the download tester. The returned function can be used to retrieve
   564  // batches of block receipts from the particularly requested peer.
   565  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   566  	dlp.waitDelay()
   567  
   568  	dlp.dl.lock.RLock()
   569  	defer dlp.dl.lock.RUnlock()
   570  
   571  	receipts := dlp.dl.peerReceipts[dlp.id]
   572  
   573  	results := make([][]*types.Receipt, 0, len(hashes))
   574  	for _, hash := range hashes {
   575  		if receipt, ok := receipts[hash]; ok {
   576  			results = append(results, receipt)
   577  		}
   578  	}
   579  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   580  
   581  	return nil
   582  }
   583  
   584  // RequestNodeData constructs a getNodeData method associated with a particular
   585  // peer in the download tester. The returned function can be used to retrieve
   586  // batches of node state data from the particularly requested peer.
   587  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   588  	dlp.waitDelay()
   589  
   590  	dlp.dl.lock.RLock()
   591  	defer dlp.dl.lock.RUnlock()
   592  
   593  	results := make([][]byte, 0, len(hashes))
   594  	for _, hash := range hashes {
   595  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   596  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   597  				results = append(results, data)
   598  			}
   599  		}
   600  	}
   601  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   602  
   603  	return nil
   604  }
   605  
   606  // assertOwnChain checks if the local chain contains the correct number of items
   607  // of the various chain components.
   608  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   609  	assertOwnForkedChain(t, tester, 1, []int{length})
   610  }
   611  
   612  // assertOwnForkedChain checks if the local forked chain contains the correct
   613  // number of items of the various chain components.
   614  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   615  	// Initialize the counters for the first fork
   616  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   617  
   618  	if receipts < 0 {
   619  		receipts = 1
   620  	}
   621  	// Update the counters for each subsequent fork
   622  	for _, length := range lengths[1:] {
   623  		headers += length - common
   624  		blocks += length - common
   625  		receipts += length - common - fsMinFullBlocks
   626  	}
   627  	switch tester.downloader.mode {
   628  	case FullSync:
   629  		receipts = 1
   630  	if hs := len(tester.ownHeaders); hs != headers {
   631  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   632  	}
   633  	if bs := len(tester.ownBlocks); bs != blocks {
   634  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   635  	}
   636  	if rs := len(tester.ownReceipts); rs != receipts {
   637  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   638  	}
   639  	// Verify the state trie too for fast syncs
   640  	/*if tester.downloader.mode == FastSync {
   641  		pivot := uint64(0)
   642  		var index int
   643  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   644  			index = pivot
   645  		} else {
   646  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   647  		}
   648  		if index > 0 {
   649  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   650  				t.Fatalf("state reconstruction failed: %v", err)
   651  			}
   652  		}
   653  	}*/
   654  }
   655  
   656  // Tests that simple synchronization against a canonical chain works correctly.
   657  // In this test common ancestor lookup should be short circuited and not require
   658  // binary searching.
   659  //func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   660  //func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   661  //func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   662  //func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   663  //func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   664  //
   665  //func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   666  //	t.Parallel()
   667  //
   668  //	tester := newTester()
   669  //	defer tester.terminate()
   670  //
   671  //	// Create a small enough block chain to download
   672  //	targetBlocks := blockCacheItems - 15
   673  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   674  //
   675  //	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   676  //
   677  //	// Synchronise with the peer and make sure all relevant data was retrieved
   678  //	if err := tester.sync("peer", nil, mode); err != nil {
   679  //		t.Fatalf("failed to synchronise blocks: %v", err)
   680  //	}
   681  //	assertOwnChain(t, tester, targetBlocks+1)
   682  //}
   683  //
   684  //// Tests that if a large batch of blocks are being downloaded, it is throttled
   685  //// until the cached blocks are retrieved.
   686  //func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   687  //func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   688  //func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   689  //func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   690  //func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   691  //
   692  //func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   693  //	t.Parallel()
   694  //	tester := newTester()
   695  //	defer tester.terminate()
   696  //
   697  //	// Create a long block chain to download and the tester
   698  //	targetBlocks := 8 * blockCacheItems
   699  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   700  //
   701  //	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   702  //
   703  //	// Wrap the importer to allow stepping
   704  //	blocked, proceed := uint32(0), make(chan struct{})
   705  //	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   706  //		atomic.StoreUint32(&blocked, uint32(len(results)))
   707  //		<-proceed
   708  //	}
   709  //	// Start a synchronisation concurrently
   710  //	errc := make(chan error)
   711  //	go func() {
   712  //		errc <- tester.sync("peer", nil, mode)
   713  //	}()
   714  //	// Iteratively take some blocks, always checking the retrieval count
   715  //	for {
   716  //		// Check the retrieval count synchronously (! reason for this ugly block)
   717  //		tester.lock.RLock()
   718  //		retrieved := len(tester.ownBlocks)
   719  //		tester.lock.RUnlock()
   720  //		if retrieved >= targetBlocks+1 {
   721  //			break
   722  //		}
   723  //		// Wait a bit for sync to throttle itself
   724  //		var cached, frozen int
   725  //		for start := time.Now(); time.Since(start) < 3*time.Second; {
   726  //			time.Sleep(25 * time.Millisecond)
   727  //
   728  //			tester.lock.Lock()
   729  //			tester.downloader.queue.lock.Lock()
   730  //			cached = len(tester.downloader.queue.blockDonePool)
   731  //			if mode == FastSync {
   732  //				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   733  //					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   734  //					cached = receipts
   735  //					//}
   736  //				}
   737  //			}
   738  //			frozen = int(atomic.LoadUint32(&blocked))
   739  //			retrieved = len(tester.ownBlocks)
   740  //			tester.downloader.queue.lock.Unlock()
   741  //			tester.lock.Unlock()
   742  //
   743  //			if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
   744  //				break
   745  //			}
   746  //		}
   747  //		// Make sure we filled up the cache, then exhaust it
   748  //		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   749  //
   750  //		tester.lock.RLock()
   751  //		retrieved = len(tester.ownBlocks)
   752  //		tester.lock.RUnlock()
   753  //		if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
   754  //			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   755  //		}
   756  //		// Permit the blocked blocks to import
   757  //		if atomic.LoadUint32(&blocked) > 0 {
   758  //			atomic.StoreUint32(&blocked, uint32(0))
   759  //			proceed <- struct{}{}
   760  //		}
   761  //	}
   762  //	// Check that we haven't pulled more blocks than available
   763  //	assertOwnChain(t, tester, targetBlocks+1)
   764  //	if err := <-errc; err != nil {
   765  //		t.Fatalf("block synchronization failed: %v", err)
   766  //	}
   767  //}
   768  //
   769  //// Tests that simple synchronization against a forked chain works correctly. In
   770  //// this test common ancestor lookup should *not* be short circuited, and a full
   771  //// binary search should be executed.
   772  //func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   773  //func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   774  //func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   775  //func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   776  //func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   777  //func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   778  //
   779  //func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   780  //	t.Parallel()
   781  //
   782  //	tester := newTester()
   783  //	defer tester.terminate()
   784  //
   785  //	// Create a long enough forked chain
   786  //	common, fork := MaxHashFetch, 2*MaxHashFetch
   787  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   788  //
   789  //	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   790  //	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   791  //
   792  //	// Synchronise with the peer and make sure all blocks were retrieved
   793  //	if err := tester.sync("fork A", nil, mode); err != nil {
   794  //		t.Fatalf("failed to synchronise blocks: %v", err)
   795  //	}
   796  //	assertOwnChain(t, tester, common+fork+1)
   797  //
   798  //	// Synchronise with the second peer and make sure that fork is pulled too
   799  //	if err := tester.sync("fork B", nil, mode); err != nil {
   800  //		t.Fatalf("failed to synchronise blocks: %v", err)
   801  //	}
   802  //	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   803  //}
   804  //
   805  //// Tests that synchronising against a much shorter but much heavyer fork works
   806  //// corrently and is not dropped.
   807  //func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   808  //func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   809  //func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   810  //func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   811  //func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   812  //func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   813  //
   814  //func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   815  //	t.Parallel()
   816  //
   817  //	tester := newTester()
   818  //	defer tester.terminate()
   819  //
   820  //	// Create a long enough forked chain
   821  //	common, fork := MaxHashFetch, 4*MaxHashFetch
   822  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   823  //
   824  //	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   825  //	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   826  //
   827  //	// Synchronise with the peer and make sure all blocks were retrieved
   828  //	if err := tester.sync("light", nil, mode); err != nil {
   829  //		t.Fatalf("failed to synchronise blocks: %v", err)
   830  //	}
   831  //	assertOwnChain(t, tester, common+fork+1)
   832  //
   833  //	// Synchronise with the second peer and make sure that fork is pulled too
   834  //	if err := tester.sync("heavy", nil, mode); err != nil {
   835  //		t.Fatalf("failed to synchronise blocks: %v", err)
   836  //	}
   837  //	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   838  //}
   839  //
   840  //// Tests that chain forks are contained within a certain interval of the current
   841  //// chain head, ensuring that malicious peers cannot waste resources by feeding
   842  //// long dead chains.
   843  //func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   844  //func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   845  //func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   846  //func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   847  //func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   848  //func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   849  //
   850  //func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   851  //	t.Parallel()
   852  //
   853  //	tester := newTester()
   854  //	defer tester.terminate()
   855  //
   856  //	// Create a long enough forked chain
   857  //	common, fork := 13, int(MaxForkAncestry+17)
   858  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   859  //
   860  //	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   861  //	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   862  //
   863  //	// Synchronise with the peer and make sure all blocks were retrieved
   864  //	if err := tester.sync("original", nil, mode); err != nil {
   865  //		t.Fatalf("failed to synchronise blocks: %v", err)
   866  //	}
   867  //	assertOwnChain(t, tester, common+fork+1)
   868  //
   869  //	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   870  //	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   871  //		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   872  //	}
   873  //}
   874  //
   875  //// Tests that chain forks are contained within a certain interval of the current
   876  //// chain head for short but heavy forks too. These are a bit special because they
   877  //// take different ancestor lookup paths.
   878  //func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   879  //func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   880  //func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   881  //func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   882  //func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   883  //func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   884  //
   885  //func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   886  //	t.Parallel()
   887  //
   888  //	tester := newTester()
   889  //	defer tester.terminate()
   890  //
   891  //	// Create a long enough forked chain
   892  //	common, fork := 13, int(MaxForkAncestry+17)
   893  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   894  //
   895  //	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   896  //	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   897  //
   898  //	// Synchronise with the peer and make sure all blocks were retrieved
   899  //	if err := tester.sync("original", nil, mode); err != nil {
   900  //		t.Fatalf("failed to synchronise blocks: %v", err)
   901  //	}
   902  //	assertOwnChain(t, tester, common+fork+1)
   903  //
   904  //	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   905  //	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   906  //		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   907  //	}
   908  //}
   909  //
   910  //// Tests that an inactive downloader will not accept incoming block headers and
   911  //// bodies.
   912  //func TestInactiveDownloader62(t *testing.T) {
   913  //	t.Parallel()
   914  //
   915  //	tester := newTester()
   916  //	defer tester.terminate()
   917  //
   918  //	// Check that neither block headers nor bodies are accepted
   919  //	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   920  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   921  //	}
   922  //	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   923  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   924  //	}
   925  //}
   926  //
   927  //// Tests that an inactive downloader will not accept incoming block headers,
   928  //// bodies and receipts.
   929  //func TestInactiveDownloader63(t *testing.T) {
   930  //	t.Parallel()
   931  //
   932  //	tester := newTester()
   933  //	defer tester.terminate()
   934  //
   935  //	// Check that neither block headers nor bodies are accepted
   936  //	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   937  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   938  //	}
   939  //	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   940  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   941  //	}
   942  //	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   943  //		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   944  //	}
   945  //}
   946  //
   947  //// Tests that a canceled download wipes all previously accumulated state.
   948  //func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   949  //func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   950  //func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   951  //func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   952  //func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   953  //func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   954  //
   955  //func testCancel(t *testing.T, protocol int, mode SyncMode) {
   956  //	t.Parallel()
   957  //
   958  //	tester := newTester()
   959  //	defer tester.terminate()
   960  //
   961  //	// Create a small enough block chain to download and the tester
   962  //	targetBlocks := blockCacheItems - 15
   963  //	if targetBlocks >= MaxHashFetch {
   964  //		targetBlocks = MaxHashFetch - 15
   965  //	}
   966  //	if targetBlocks >= MaxHeaderFetch {
   967  //		targetBlocks = MaxHeaderFetch - 15
   968  //	}
   969  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   970  //
   971  //	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   972  //
   973  //	// Make sure canceling works with a pristine downloader
   974  //	tester.downloader.Cancel()
   975  //	if !tester.downloader.queue.Idle() {
   976  //		t.Errorf("download queue not idle")
   977  //	}
   978  //	// Synchronise with the peer, but cancel afterwards
   979  //	if err := tester.sync("peer", nil, mode); err != nil {
   980  //		t.Fatalf("failed to synchronise blocks: %v", err)
   981  //	}
   982  //	tester.downloader.Cancel()
   983  //	if !tester.downloader.queue.Idle() {
   984  //		t.Errorf("download queue not idle")
   985  //	}
   986  //}
   987  //
   988  //// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   989  //func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   990  //func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   991  //func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   992  //func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   993  //func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   994  //func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   995  //
   996  //func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   997  //	t.Parallel()
   998  //
   999  //	tester := newTester()
  1000  //	defer tester.terminate()
  1001  //
  1002  //	// Create various peers with various parts of the chain
  1003  //	targetPeers := 8
  1004  //	targetBlocks := targetPeers*blockCacheItems - 15
  1005  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1006  //
  1007  //	for i := 0; i < targetPeers; i++ {
  1008  //		id := fmt.Sprintf("peer #%d", i)
  1009  //		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
  1010  //	}
  1011  //	if err := tester.sync("peer #0", nil, mode); err != nil {
  1012  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1013  //	}
  1014  //	assertOwnChain(t, tester, targetBlocks+1)
  1015  //}
  1016  //
  1017  //// Tests that synchronisations behave well in multi-version protocol environments
  1018  //// and not wreak havoc on other nodes in the network.
  1019  //func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1020  //func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1021  //func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1022  //func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1023  //func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1024  //func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1025  //
  1026  //func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1027  //	t.Parallel()
  1028  //
  1029  //	tester := newTester()
  1030  //	defer tester.terminate()
  1031  //
  1032  //	// Create a small enough block chain to download
  1033  //	targetBlocks := blockCacheItems - 15
  1034  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1035  //
  1036  //	// Create peers of every type
  1037  //	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1038  //	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1039  //	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1040  //
  1041  //	// Synchronise with the requested peer and make sure all blocks were retrieved
  1042  //	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1043  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1044  //	}
  1045  //	assertOwnChain(t, tester, targetBlocks+1)
  1046  //
  1047  //	// Check that no peers have been dropped off
  1048  //	for _, version := range []int{62, 63, 64} {
  1049  //		peer := fmt.Sprintf("peer %d", version)
  1050  //		if _, ok := tester.peerHashes[peer]; !ok {
  1051  //			t.Errorf("%s dropped", peer)
  1052  //		}
  1053  //	}
  1054  //}
  1055  //
  1056  //// Tests that if a block is empty (e.g. header only), no body request should be
  1057  //// made, and instead the header should be assembled into a whole block in itself.
  1058  //func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1059  //func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1060  //func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1061  //func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1062  //func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1063  //func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1064  //
  1065  //func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1066  //	t.Parallel()
  1067  //
  1068  //	tester := newTester()
  1069  //	defer tester.terminate()
  1070  //
  1071  //	// Create a block chain to download
  1072  //	targetBlocks := 2*blockCacheItems - 15
  1073  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1074  //
  1075  //	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1076  //
  1077  //	// Instrument the downloader to signal body requests
  1078  //	bodiesHave, receiptsHave := int32(0), int32(0)
  1079  //	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1080  //		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1081  //	}
  1082  //	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1083  //		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1084  //	}
  1085  //	// Synchronise with the peer and make sure all blocks were retrieved
  1086  //	if err := tester.sync("peer", nil, mode); err != nil {
  1087  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1088  //	}
  1089  //	assertOwnChain(t, tester, targetBlocks+1)
  1090  //
  1091  //	// Validate the number of block bodies that should have been requested
  1092  //	bodiesNeeded, receiptsNeeded := 0, 0
  1093  //	for _, block := range blocks {
  1094  //		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1095  //			bodiesNeeded++
  1096  //		}
  1097  //	}
  1098  //	for _, receipt := range receipts {
  1099  //		if mode == FastSync && len(receipt) > 0 {
  1100  //			receiptsNeeded++
  1101  //		}
  1102  //	}
  1103  //	if int(bodiesHave) != bodiesNeeded {
  1104  //		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1105  //	}
  1106  //	if int(receiptsHave) != receiptsNeeded {
  1107  //		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1108  //	}
  1109  //}
  1110  //
  1111  //// Tests that headers are enqueued continuously, preventing malicious nodes from
  1112  //// stalling the downloader by feeding gapped header chains.
  1113  //func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1114  //func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1115  //func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1116  //func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1117  //func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1118  //func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1119  //
  1120  //func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1121  //	t.Parallel()
  1122  //
  1123  //	tester := newTester()
  1124  //	defer tester.terminate()
  1125  //
  1126  //	// Create a small enough block chain to download
  1127  //	targetBlocks := blockCacheItems - 15
  1128  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1129  //
  1130  //	// Attempt a full sync with an attacker feeding gapped headers
  1131  //	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1132  //	missing := targetBlocks / 2
  1133  //	delete(tester.peerHeaders["attack"], hashes[missing])
  1134  //
  1135  //	if err := tester.sync("attack", nil, mode); err == nil {
  1136  //		t.Fatalf("succeeded attacker synchronisation")
  1137  //	}
  1138  //	// Synchronise with the valid peer and make sure sync succeeds
  1139  //	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1140  //	if err := tester.sync("valid", nil, mode); err != nil {
  1141  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1142  //	}
  1143  //	assertOwnChain(t, tester, targetBlocks+1)
  1144  //}
  1145  //
  1146  //// Tests that if requested headers are shifted (i.e. first is missing), the queue
  1147  //// detects the invalid numbering.
  1148  //func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1149  //func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1150  //func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1151  //func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1152  //func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1153  //func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1154  //
  1155  //func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1156  //	t.Parallel()
  1157  //
  1158  //	tester := newTester()
  1159  //	defer tester.terminate()
  1160  //
  1161  //	// Create a small enough block chain to download
  1162  //	targetBlocks := blockCacheItems - 15
  1163  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1164  //
  1165  //	// Attempt a full sync with an attacker feeding shifted headers
  1166  //	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1167  //	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1168  //	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1169  //	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1170  //
  1171  //	if err := tester.sync("attack", nil, mode); err == nil {
  1172  //		t.Fatalf("succeeded attacker synchronisation")
  1173  //	}
  1174  //	// Synchronise with the valid peer and make sure sync succeeds
  1175  //	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1176  //	if err := tester.sync("valid", nil, mode); err != nil {
  1177  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1178  //	}
  1179  //	assertOwnChain(t, tester, targetBlocks+1)
  1180  //}
  1181  //
  1182  //// Tests that upon detecting an invalid header, the recent ones are rolled back
  1183  //// for various failure scenarios. Afterwards a full sync is attempted to make
  1184  //// sure no state was corrupted.
  1185  //func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1186  //func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1187  //func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1188  //
  1189  //func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1190  //	t.Parallel()
  1191  //
  1192  //	tester := newTester()
  1193  //	defer tester.terminate()
  1194  //
  1195  //	// Create a small enough block chain to download
  1196  //	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1197  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1198  //
  1199  //	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1200  //	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1201  //	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1202  //	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1203  //	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1204  //
  1205  //	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1206  //		t.Fatalf("succeeded fast attacker synchronisation")
  1207  //	}
  1208  //	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1209  //		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1210  //	}
  1211  //	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1212  //	// This should result in both the last fsHeaderSafetyNet number of headers being
  1213  //	// rolled back, and also the pivot point being reverted to a non-block status.
  1214  //	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1215  //	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1216  //	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1217  //	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1218  //
  1219  //	if err := tester.sync("block-attack", nil, mode); err == nil {
  1220  //		t.Fatalf("succeeded block attacker synchronisation")
  1221  //	}
  1222  //	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1223  //		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1224  //	}
  1225  //	if mode == FastSync {
  1226  //		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1227  //			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1228  //		}
  1229  //	}
  1230  //	// Attempt to sync with an attacker that withholds promised blocks after the
  1231  //	// fast sync pivot point. This could be a trial to leave the node with a bad
  1232  //	// but already imported pivot block.
  1233  //	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1234  //	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1235  //
  1236  //	tester.downloader.syncInitHook = func(uint64, uint64) {
  1237  //		for i := missing; i <= len(hashes); i++ {
  1238  //			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1239  //		}
  1240  //		tester.downloader.syncInitHook = nil
  1241  //	}
  1242  //
  1243  //	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1244  //		t.Fatalf("succeeded withholding attacker synchronisation")
  1245  //	}
  1246  //	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1247  //		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1248  //	}
  1249  //	if mode == FastSync {
  1250  //		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1251  //			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1252  //		}
  1253  //	}
  1254  //	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1255  //	// rollback should also disable fast syncing for this process, verify that we
  1256  //	// did a fresh full sync. Note, we can't assert anything about the receipts
  1257  //	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1258  //	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1259  //	if err := tester.sync("valid", nil, mode); err != nil {
  1260  //		t.Fatalf("failed to synchronise blocks: %v", err)
  1261  //	}
  1262  //	if hs := len(tester.ownHeaders); hs != len(headers) {
  1263  //		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1264  //	}
  1265  //	if mode != LightSync {
  1266  //		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1267  //			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1268  //		}
  1269  //	}
  1270  //}
  1271  //
  1272  //// Tests that a peer advertising an high TD doesn't get to stall the downloader
  1273  //// afterwards by not sending any useful hashes.
  1274  //func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1275  //func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1276  //func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1277  //func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1278  //func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1279  //func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1280  //
  1281  //func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1282  //	t.Parallel()
  1283  //
  1284  //	tester := newTester()
  1285  //	defer tester.terminate()
  1286  //
  1287  //	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1288  //	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1289  //
  1290  //	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1291  //		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1292  //	}
  1293  //}
  1294  //
  1295  //// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1296  //func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1297  //func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1298  //func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1299  //
  1300  //func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1301  //	t.Parallel()
  1302  //
  1303  //	// Define the disconnection requirement for individual hash fetch errors
  1304  //	tests := []struct {
  1305  //		result error
  1306  //		drop   bool
  1307  //	}{
  1308  //		{nil, false},                        // Sync succeeded, all is well
  1309  //		{errBusy, false},                    // Sync is already in progress, no problem
  1310  //		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1311  //		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1312  //		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1313  //		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1314  //		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1315  //		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1316  //		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1317  //		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1318  //		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1319  //		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1320  //		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1321  //		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1322  //		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1323  //		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1324  //		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1325  //		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1326  //		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1327  //		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1328  //	}
  1329  //	// Run the tests and check disconnection status
  1330  //	tester := newTester()
  1331  //	defer tester.terminate()
  1332  //
  1333  //	for i, tt := range tests {
  1334  //		// Register a new peer and ensure it's presence
  1335  //		id := fmt.Sprintf("test %d", i)
  1336  //		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1337  //			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1338  //		}
  1339  //		if _, ok := tester.peerHashes[id]; !ok {
  1340  //			t.Fatalf("test %d: registered peer not found", i)
  1341  //		}
  1342  //		// Simulate a synchronisation and check the required result
  1343  //		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1344  //
  1345  //		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1346  //		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1347  //			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1348  //		}
  1349  //	}
  1350  //}
  1351  //
  1352  //// Tests that synchronisation progress (origin block number, current block number
  1353  //// and highest block number) is tracked and updated correctly.
  1354  //func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1355  //func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1356  //func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1357  //func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1358  //func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1359  //func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1360  //
  1361  //func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1362  //	t.Parallel()
  1363  //
  1364  //	tester := newTester()
  1365  //	defer tester.terminate()
  1366  //
  1367  //	// Create a small enough block chain to download
  1368  //	targetBlocks := blockCacheItems - 15
  1369  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1370  //
  1371  //	// Set a sync init hook to catch progress changes
  1372  //	starting := make(chan struct{})
  1373  //	progress := make(chan struct{})
  1374  //
  1375  //	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1376  //		starting <- struct{}{}
  1377  //		<-progress
  1378  //	}
  1379  //	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1380  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1381  //		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1382  //	}
  1383  //	// Synchronise half the blocks and check initial progress
  1384  //	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1385  //	pending := new(sync.WaitGroup)
  1386  //	pending.Add(1)
  1387  //
  1388  //	go func() {
  1389  //		defer pending.Done()
  1390  //		if err := tester.sync("peer-half", nil, mode); err != nil {
  1391  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1392  //		}
  1393  //	}()
  1394  //	<-starting
  1395  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1396  //		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1397  //	}
  1398  //	progress <- struct{}{}
  1399  //	pending.Wait()
  1400  //
  1401  //	// Synchronise all the blocks and check continuation progress
  1402  //	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1403  //	pending.Add(1)
  1404  //
  1405  //	go func() {
  1406  //		defer pending.Done()
  1407  //		if err := tester.sync("peer-full", nil, mode); err != nil {
  1408  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1409  //		}
  1410  //	}()
  1411  //	<-starting
  1412  //	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1413  //		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1414  //	}
  1415  //	progress <- struct{}{}
  1416  //	pending.Wait()
  1417  //
  1418  //	// Check final progress after successful sync
  1419  //	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1420  //		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1421  //	}
  1422  //}
  1423  //
  1424  //// Tests that synchronisation progress (origin block number and highest block
  1425  //// number) is tracked and updated correctly in case of a fork (or manual head
  1426  //// revertal).
  1427  //func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1428  //func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1429  //func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1430  //func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1431  //func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1432  //func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1433  //
  1434  //func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1435  //	t.Parallel()
  1436  //
  1437  //	tester := newTester()
  1438  //	defer tester.terminate()
  1439  //
  1440  //	// Create a forked chain to simulate origin revertal
  1441  //	common, fork := MaxHashFetch, 2*MaxHashFetch
  1442  //	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1443  //
  1444  //	// Set a sync init hook to catch progress changes
  1445  //	starting := make(chan struct{})
  1446  //	progress := make(chan struct{})
  1447  //
  1448  //	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1449  //		starting <- struct{}{}
  1450  //		<-progress
  1451  //	}
  1452  //	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1453  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1454  //		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1455  //	}
  1456  //	// Synchronise with one of the forks and check progress
  1457  //	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1458  //	pending := new(sync.WaitGroup)
  1459  //	pending.Add(1)
  1460  //
  1461  //	go func() {
  1462  //		defer pending.Done()
  1463  //		if err := tester.sync("fork A", nil, mode); err != nil {
  1464  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1465  //		}
  1466  //	}()
  1467  //	<-starting
  1468  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1469  //		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1470  //	}
  1471  //	progress <- struct{}{}
  1472  //	pending.Wait()
  1473  //
  1474  //	// Simulate a successful sync above the fork
  1475  //	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1476  //
  1477  //	// Synchronise with the second fork and check progress resets
  1478  //	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1479  //	pending.Add(1)
  1480  //
  1481  //	go func() {
  1482  //		defer pending.Done()
  1483  //		if err := tester.sync("fork B", nil, mode); err != nil {
  1484  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1485  //		}
  1486  //	}()
  1487  //	<-starting
  1488  //	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1489  //		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1490  //	}
  1491  //	progress <- struct{}{}
  1492  //	pending.Wait()
  1493  //
  1494  //	// Check final progress after successful sync
  1495  //	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1496  //		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1497  //	}
  1498  //}
  1499  //
  1500  //// Tests that if synchronisation is aborted due to some failure, then the progress
  1501  //// origin is not updated in the next sync cycle, as it should be considered the
  1502  //// continuation of the previous sync and not a new instance.
  1503  //func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1504  //func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1505  //func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1506  //func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1507  //func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1508  //func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1509  //
  1510  //func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1511  //	t.Parallel()
  1512  //
  1513  //	tester := newTester()
  1514  //	defer tester.terminate()
  1515  //
  1516  //	// Create a small enough block chain to download
  1517  //	targetBlocks := blockCacheItems - 15
  1518  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1519  //
  1520  //	// Set a sync init hook to catch progress changes
  1521  //	starting := make(chan struct{})
  1522  //	progress := make(chan struct{})
  1523  //
  1524  //	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1525  //		starting <- struct{}{}
  1526  //		<-progress
  1527  //	}
  1528  //	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1529  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1530  //		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1531  //	}
  1532  //	// Attempt a full sync with a faulty peer
  1533  //	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1534  //	missing := targetBlocks / 2
  1535  //	delete(tester.peerHeaders["faulty"], hashes[missing])
  1536  //	delete(tester.peerBlocks["faulty"], hashes[missing])
  1537  //	delete(tester.peerReceipts["faulty"], hashes[missing])
  1538  //
  1539  //	pending := new(sync.WaitGroup)
  1540  //	pending.Add(1)
  1541  //
  1542  //	go func() {
  1543  //		defer pending.Done()
  1544  //		if err := tester.sync("faulty", nil, mode); err == nil {
  1545  //			panic("succeeded faulty synchronisation")
  1546  //		}
  1547  //	}()
  1548  //	<-starting
  1549  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1550  //		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1551  //	}
  1552  //	progress <- struct{}{}
  1553  //	pending.Wait()
  1554  //
  1555  //	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1556  //	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1557  //	pending.Add(1)
  1558  //
  1559  //	go func() {
  1560  //		defer pending.Done()
  1561  //		if err := tester.sync("valid", nil, mode); err != nil {
  1562  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1563  //		}
  1564  //	}()
  1565  //	<-starting
  1566  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1567  //		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1568  //	}
  1569  //	progress <- struct{}{}
  1570  //	pending.Wait()
  1571  //
  1572  //	// Check final progress after successful sync
  1573  //	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1574  //		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1575  //	}
  1576  //}
  1577  //
  1578  //// Tests that if an attacker fakes a chain height, after the attack is detected,
  1579  //// the progress height is successfully reduced at the next sync invocation.
  1580  //func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1581  //func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1582  //func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1583  //func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1584  //func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1585  //func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1586  //
  1587  //func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1588  //	t.Parallel()
  1589  //
  1590  //	tester := newTester()
  1591  //	defer tester.terminate()
  1592  //
  1593  //	// Create a small block chain
  1594  //	targetBlocks := blockCacheItems - 15
  1595  //	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1596  //
  1597  //	// Set a sync init hook to catch progress changes
  1598  //	starting := make(chan struct{})
  1599  //	progress := make(chan struct{})
  1600  //
  1601  //	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1602  //		starting <- struct{}{}
  1603  //		<-progress
  1604  //	}
  1605  //	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1606  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1607  //		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1608  //	}
  1609  //	//  Create and sync with an attacker that promises a higher chain than available
  1610  //	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1611  //	for i := 1; i < 3; i++ {
  1612  //		delete(tester.peerHeaders["attack"], hashes[i])
  1613  //		delete(tester.peerBlocks["attack"], hashes[i])
  1614  //		delete(tester.peerReceipts["attack"], hashes[i])
  1615  //	}
  1616  //
  1617  //	pending := new(sync.WaitGroup)
  1618  //	pending.Add(1)
  1619  //
  1620  //	go func() {
  1621  //		defer pending.Done()
  1622  //		if err := tester.sync("attack", nil, mode); err == nil {
  1623  //			panic("succeeded attacker synchronisation")
  1624  //		}
  1625  //	}()
  1626  //	<-starting
  1627  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1628  //		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1629  //	}
  1630  //	progress <- struct{}{}
  1631  //	pending.Wait()
  1632  //
  1633  //	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1634  //	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1635  //	pending.Add(1)
  1636  //
  1637  //	go func() {
  1638  //		defer pending.Done()
  1639  //		if err := tester.sync("valid", nil, mode); err != nil {
  1640  //			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1641  //		}
  1642  //	}()
  1643  //	<-starting
  1644  //	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1645  //		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1646  //	}
  1647  //	progress <- struct{}{}
  1648  //	pending.Wait()
  1649  //
  1650  //	// Check final progress after successful sync
  1651  //	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1652  //		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1653  //	}
  1654  //}
  1655  //
  1656  //// This test reproduces an issue where unexpected deliveries would
  1657  //// block indefinitely if they arrived at the right time.
  1658  //// We use data driven subtests to manage this so that it will be parallel on its own
  1659  //// and not with the other tests, avoiding intermittent failures.
  1660  //func TestDeliverHeadersHang(t *testing.T) {
  1661  //	testCases := []struct {
  1662  //		protocol int
  1663  //		syncMode SyncMode
  1664  //	}{
  1665  //		{62, FullSync},
  1666  //		{63, FullSync},
  1667  //		{63, FastSync},
  1668  //		{64, FullSync},
  1669  //		{64, FastSync},
  1670  //		{64, LightSync},
  1671  //	}
  1672  //	for _, tc := range testCases {
  1673  //		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1674  //			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1675  //		})
  1676  //	}
  1677  //}
  1678  //
  1679  //type floodingTestPeer struct {
  1680  //	peer   Peer
  1681  //	tester *downloadTester
  1682  //	pend   sync.WaitGroup
  1683  //}
  1684  //
  1685  //func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1686  //func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1687  //	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1688  //}
  1689  //func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1690  //	return ftp.peer.RequestBodies(hashes)
  1691  //}
  1692  //func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1693  //	return ftp.peer.RequestReceipts(hashes)
  1694  //}
  1695  //func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1696  //	return ftp.peer.RequestNodeData(hashes)
  1697  //}
  1698  //
  1699  //func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1700  //	deliveriesDone := make(chan struct{}, 500)
  1701  //	for i := 0; i < cap(deliveriesDone); i++ {
  1702  //		peer := fmt.Sprintf("fake-peer%d", i)
  1703  //		ftp.pend.Add(1)
  1704  //
  1705  //		go func() {
  1706  //			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1707  //			deliveriesDone <- struct{}{}
  1708  //			ftp.pend.Done()
  1709  //		}()
  1710  //	}
  1711  //	// Deliver the actual requested headers.
  1712  //	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1713  //	// None of the extra deliveries should block.
  1714  //	timeout := time.After(60 * time.Second)
  1715  //	for i := 0; i < cap(deliveriesDone); i++ {
  1716  //		select {
  1717  //		case <-deliveriesDone:
  1718  //		case <-timeout:
  1719  //			panic("blocked")
  1720  //		}
  1721  //	}
  1722  //	return nil
  1723  //}
  1724  //
  1725  //func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1726  //	t.Parallel()
  1727  //
  1728  //	master := newTester()
  1729  //	defer master.terminate()
  1730  //
  1731  //	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1732  //	for i := 0; i < 200; i++ {
  1733  //		tester := newTester()
  1734  //		tester.peerDb = master.peerDb
  1735  //
  1736  //		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1737  //		// Whenever the downloader requests headers, flood it with
  1738  //		// a lot of unrequested header deliveries.
  1739  //		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1740  //			peer:   tester.downloader.peers.peers["peer"].peer,
  1741  //			tester: tester,
  1742  //		}
  1743  //		if err := tester.sync("peer", nil, mode); err != nil {
  1744  //			t.Errorf("test %d: sync failed: %v", i, err)
  1745  //		}
  1746  //		tester.terminate()
  1747  //
  1748  //		// Flush all goroutines to prevent messing with subsequent tests
  1749  //		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1750  //	}
  1751  //}