github.com/r8d8/go-ethereum@v5.5.2+incompatible/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // +build !deterministic
    18  
    19  package downloader
    20  
    21  import (
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereumproject/go-ethereum/common"
    31  	"github.com/ethereumproject/go-ethereum/core"
    32  	"github.com/ethereumproject/go-ethereum/core/types"
    33  	"github.com/ethereumproject/go-ethereum/crypto"
    34  	"github.com/ethereumproject/go-ethereum/ethdb"
    35  	"github.com/ethereumproject/go-ethereum/event"
    36  	"github.com/ethereumproject/go-ethereum/logger/glog"
    37  	"github.com/ethereumproject/go-ethereum/trie"
    38  )
    39  
    40  var (
    41  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    42  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    43  )
    44  
    45  // Reduce some of the parameters to make the tester faster.
    46  func init() {
    47  	MaxForkAncestry = 10000
    48  	blockCacheItems = 1024
    49  	fsHeaderContCheck = 500 * time.Millisecond
    50  	glog.SetD(0)
    51  	glog.SetV(0)
    52  }
    53  
    54  // downloadTester is a test simulator for mocking out local block chain.
    55  type downloadTester struct {
    56  	downloader *Downloader
    57  
    58  	genesis *types.Block   // Genesis blocks used by the tester and peers
    59  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    60  	peerDb  ethdb.Database // Database of the peers containing all data
    61  
    62  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    63  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    64  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    65  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    66  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    67  
    68  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    69  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    70  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    71  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    72  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    73  
    74  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    75  
    76  	lock sync.RWMutex
    77  }
    78  
    79  // newTester creates a new downloader test mocker.
    80  func newTester() *downloadTester {
    81  	testdb, _ := ethdb.NewMemDatabase()
    82  	genesis := core.WriteGenesisBlockForTesting(testdb, core.GenesisAccount{Address: testAddress, Balance: big.NewInt(1000000000)})
    83  	//genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    84  
    85  	tester := &downloadTester{
    86  		genesis:           genesis,
    87  		peerDb:            testdb,
    88  		ownHashes:         []common.Hash{genesis.Hash()},
    89  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    90  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    91  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    92  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    93  		peerHashes:        make(map[string][]common.Hash),
    94  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    95  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    96  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    97  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    98  		peerMissingStates: make(map[string]map[common.Hash]bool),
    99  	}
   100  	tester.stateDb, _ = ethdb.NewMemDatabase()
   101  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
   102  
   103  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   104  
   105  	return tester
   106  }
   107  
   108  // makeChain creates a chain of n blocks starting at and including parent.
   109  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   110  // contains a transaction and every 5th an uncle to allow testing correct block
   111  // reassembly.
   112  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   113  	// Generate the block chain
   114  	blocks, receipts := core.GenerateChain(core.DefaultConfigMorden.ChainConfig, parent, dl.peerDb, n, func(i int, block *core.BlockGen) {
   115  		block.SetCoinbase(common.Address{seed})
   116  
   117  		// If a heavy chain is requested, delay blocks to raise difficulty
   118  		if heavy {
   119  			block.OffsetTime(-1)
   120  		}
   121  		// If the block number is multiple of 3, send a bonus transaction to the miner
   122  		if parent == dl.genesis && i%3 == 0 {
   123  			tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), core.TxGas, nil, nil).SignECDSA(testKey)
   124  			if err != nil {
   125  				panic(err)
   126  			}
   127  			block.AddTx(tx)
   128  		}
   129  		// If the block number is a multiple of 5, add a bonus uncle to the block
   130  		if i > 0 && i%5 == 0 {
   131  			block.AddUncle(&types.Header{
   132  				ParentHash: block.PrevBlock(i - 1).Hash(),
   133  				Number:     big.NewInt(block.Number().Int64() - 1),
   134  			})
   135  		}
   136  	})
   137  	// Convert the block-chain into a hash-chain and header/block maps
   138  	hashes := make([]common.Hash, n+1)
   139  	hashes[len(hashes)-1] = parent.Hash()
   140  
   141  	headerm := make(map[common.Hash]*types.Header, n+1)
   142  	headerm[parent.Hash()] = parent.Header()
   143  
   144  	blockm := make(map[common.Hash]*types.Block, n+1)
   145  	blockm[parent.Hash()] = parent
   146  
   147  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   148  	receiptm[parent.Hash()] = parentReceipts
   149  
   150  	for i, b := range blocks {
   151  		hashes[len(hashes)-i-2] = b.Hash()
   152  		headerm[b.Hash()] = b.Header()
   153  		blockm[b.Hash()] = b
   154  		receiptm[b.Hash()] = receipts[i]
   155  	}
   156  	return hashes, headerm, blockm, receiptm
   157  }
   158  
   159  // makeChainFork creates two chains of length n, such that h1[:f] and
   160  // h2[:f] are different but have a common suffix of length n-f.
   161  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   162  	// Create the common suffix
   163  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   164  
   165  	// Create the forks, making the second heavyer if non balanced forks were requested
   166  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   167  	hashes1 = append(hashes1, hashes[1:]...)
   168  
   169  	heavy := false
   170  	if !balanced {
   171  		heavy = true
   172  	}
   173  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   174  	hashes2 = append(hashes2, hashes[1:]...)
   175  
   176  	for hash, header := range headers {
   177  		headers1[hash] = header
   178  		headers2[hash] = header
   179  	}
   180  	for hash, block := range blocks {
   181  		blocks1[hash] = block
   182  		blocks2[hash] = block
   183  	}
   184  	for hash, receipt := range receipts {
   185  		receipts1[hash] = receipt
   186  		receipts2[hash] = receipt
   187  	}
   188  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   189  }
   190  
   191  // terminate aborts any operations on the embedded downloader and releases all
   192  // held resources.
   193  func (dl *downloadTester) terminate() {
   194  	dl.downloader.Terminate()
   195  }
   196  
   197  // sync starts synchronizing with a remote peer, blocking until it completes.
   198  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   199  	dl.lock.RLock()
   200  	hash := dl.peerHashes[id][0]
   201  	// If no particular TD was requested, load from the peer's blockchain
   202  	if td == nil {
   203  		td = big.NewInt(1)
   204  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   205  			td = diff
   206  		}
   207  	}
   208  	dl.lock.RUnlock()
   209  
   210  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   211  	err := dl.downloader.synchronise(id, hash, td, mode)
   212  	select {
   213  	case <-dl.downloader.cancelCh:
   214  		// Ok, downloader fully cancelled after sync cycle
   215  	default:
   216  		// Downloader is still accepting packets, can block a peer up
   217  		panic("downloader active post sync cycle") // panic will be caught by tester
   218  	}
   219  	return err
   220  }
   221  
   222  // HasHeader checks if a header is present in the testers canonical chain.
   223  func (dl *downloadTester) HasHeader(hash common.Hash) bool {
   224  	return dl.GetHeaderByHash(hash) != nil
   225  }
   226  
   227  // HasBlock checks if a block is present in the testers canonical chain.
   228  func (dl *downloadTester) HasBlock(hash common.Hash) bool {
   229  	return dl.GetBlockByHash(hash) != nil
   230  }
   231  
   232  // HasBlockAndState checks if a block and associated state is present in the testers canonical chain.
   233  func (dl *downloadTester) HasBlockAndState(hash common.Hash) bool {
   234  	block := dl.GetBlockByHash(hash)
   235  	if block == nil {
   236  		return false
   237  	}
   238  	_, err := dl.stateDb.Get(block.Root().Bytes())
   239  	return err == nil
   240  }
   241  
   242  // GetHeader retrieves a header from the testers canonical chain.
   243  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   244  	dl.lock.RLock()
   245  	defer dl.lock.RUnlock()
   246  
   247  	return dl.ownHeaders[hash]
   248  }
   249  
   250  // GetBlock retrieves a block from the testers canonical chain.
   251  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   252  	dl.lock.RLock()
   253  	defer dl.lock.RUnlock()
   254  
   255  	return dl.ownBlocks[hash]
   256  }
   257  
   258  // CurrentHeader retrieves the current head header from the canonical chain.
   259  func (dl *downloadTester) CurrentHeader() *types.Header {
   260  	dl.lock.RLock()
   261  	defer dl.lock.RUnlock()
   262  
   263  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   264  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   265  			return header
   266  		}
   267  	}
   268  	return dl.genesis.Header()
   269  }
   270  
   271  // CurrentBlock retrieves the current head block from the canonical chain.
   272  func (dl *downloadTester) CurrentBlock() *types.Block {
   273  	dl.lock.RLock()
   274  	defer dl.lock.RUnlock()
   275  
   276  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   277  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   278  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   279  				return block
   280  			}
   281  		}
   282  	}
   283  	return dl.genesis
   284  }
   285  
   286  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   287  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   288  	dl.lock.RLock()
   289  	defer dl.lock.RUnlock()
   290  
   291  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   292  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   293  			return block
   294  		}
   295  	}
   296  	return dl.genesis
   297  }
   298  
   299  // FastSyncCommitHead manually sets the head block to a given hash.
   300  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   301  	// For now only check that the state trie is correct
   302  	if block := dl.GetBlockByHash(hash); block != nil {
   303  		_, err := trie.NewSecure(block.Root(), dl.stateDb, 0)
   304  		return err
   305  	}
   306  	return fmt.Errorf("non existent block: %x", hash[:4])
   307  }
   308  
   309  // GetTd retrieves the block's total difficulty from the canonical chain.
   310  func (dl *downloadTester) GetTd(hash common.Hash) *big.Int {
   311  	dl.lock.RLock()
   312  	defer dl.lock.RUnlock()
   313  
   314  	return dl.ownChainTd[hash]
   315  }
   316  
   317  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   318  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (res *core.HeaderChainInsertResult) {
   319  	res = &core.HeaderChainInsertResult{}
   320  
   321  	dl.lock.Lock()
   322  	defer dl.lock.Unlock()
   323  
   324  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   325  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   326  		res.Error = errors.New("unknown parent")
   327  		return
   328  	}
   329  	for i := 1; i < len(headers); i++ {
   330  		if headers[i].ParentHash != headers[i-1].Hash() {
   331  			res.Error = errors.New("unknown parent")
   332  			res.Index = i
   333  			return
   334  		}
   335  	}
   336  	// Do a full insert if pre-checks passed
   337  	for i, header := range headers {
   338  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   339  			continue
   340  		}
   341  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   342  			res.Index = i
   343  			res.Error = errors.New("unknown parent")
   344  			return
   345  		}
   346  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   347  		dl.ownHeaders[header.Hash()] = header
   348  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   349  	}
   350  	res.Index = len(headers)
   351  	return
   352  }
   353  
   354  // InsertChain injects a new batch of blocks into the simulated chain.
   355  func (dl *downloadTester) InsertChain(blocks types.Blocks) (res *core.ChainInsertResult) {
   356  	res = &core.ChainInsertResult{}
   357  
   358  	dl.lock.Lock()
   359  	defer dl.lock.Unlock()
   360  
   361  	for i, block := range blocks {
   362  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   363  			res.Index = i
   364  			res.Error = errors.New("unknown parent")
   365  			return
   366  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   367  			res.Index = i
   368  			res.Error = fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   369  			return
   370  		}
   371  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   372  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   373  			dl.ownHeaders[block.Hash()] = block.Header()
   374  		}
   375  		dl.ownBlocks[block.Hash()] = block
   376  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   377  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   378  	}
   379  	res.Index = len(blocks)
   380  	return
   381  }
   382  
   383  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   384  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (res *core.ReceiptChainInsertResult) {
   385  	res = &core.ReceiptChainInsertResult{}
   386  	dl.lock.Lock()
   387  	defer dl.lock.Unlock()
   388  
   389  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   390  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   391  			res.Index = i
   392  			res.Error = errors.New("unknown owner")
   393  			return
   394  		}
   395  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   396  			res.Index = i
   397  			res.Error = errors.New("unknown parent")
   398  			return
   399  		}
   400  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   401  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   402  	}
   403  	res.Index = len(blocks)
   404  	return
   405  }
   406  
   407  // Rollback removes some recently added elements from the chain.
   408  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   409  	dl.lock.Lock()
   410  	defer dl.lock.Unlock()
   411  
   412  	for i := len(hashes) - 1; i >= 0; i-- {
   413  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   414  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   415  		}
   416  		delete(dl.ownChainTd, hashes[i])
   417  		delete(dl.ownHeaders, hashes[i])
   418  		delete(dl.ownReceipts, hashes[i])
   419  		delete(dl.ownBlocks, hashes[i])
   420  	}
   421  }
   422  
   423  // newPeer registers a new block download source into the downloader.
   424  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   425  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   426  }
   427  
   428  // newSlowPeer registers a new block download source into the downloader, with a
   429  // specific delay time on processing the network packets sent to it, simulating
   430  // potentially slow network IO.
   431  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   432  	dl.lock.Lock()
   433  	defer dl.lock.Unlock()
   434  	name := "slow peer"
   435  	p := &downloadTesterPeer{
   436  		id:    id,
   437  		dl:    dl,
   438  		delay: delay,
   439  	}
   440  	var err error
   441  	switch version {
   442  	case 62:
   443  		err = dl.downloader.RegisterPeer(id, version, name, p.Head, p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies, nil, nil)
   444  	case 63:
   445  		err = dl.downloader.RegisterPeer(id, version, name, p.Head, p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts, p.RequestNodeData)
   446  	case 64:
   447  		err = dl.downloader.RegisterPeer(id, version, name, p.Head, p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts, p.RequestNodeData)
   448  	}
   449  	if err == nil {
   450  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   451  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   452  		copy(dl.peerHashes[id], hashes)
   453  
   454  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   455  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   456  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   457  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   458  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   459  
   460  		genesis := hashes[len(hashes)-1]
   461  		if header := headers[genesis]; header != nil {
   462  			dl.peerHeaders[id][genesis] = header
   463  			dl.peerChainTds[id][genesis] = header.Difficulty
   464  		}
   465  		if block := blocks[genesis]; block != nil {
   466  			dl.peerBlocks[id][genesis] = block
   467  			dl.peerChainTds[id][genesis] = block.Difficulty()
   468  		}
   469  
   470  		for i := len(hashes) - 2; i >= 0; i-- {
   471  			hash := hashes[i]
   472  
   473  			if header, ok := headers[hash]; ok {
   474  				dl.peerHeaders[id][hash] = header
   475  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   476  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   477  				}
   478  			}
   479  			if block, ok := blocks[hash]; ok {
   480  				dl.peerBlocks[id][hash] = block
   481  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   482  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   483  				}
   484  			}
   485  			if receipt, ok := receipts[hash]; ok {
   486  				dl.peerReceipts[id][hash] = receipt
   487  			}
   488  		}
   489  	}
   490  	return err
   491  }
   492  
   493  // dropPeer simulates a hard peer removal from the connection pool.
   494  func (dl *downloadTester) dropPeer(id string) {
   495  	dl.lock.Lock()
   496  	defer dl.lock.Unlock()
   497  
   498  	delete(dl.peerHashes, id)
   499  	delete(dl.peerHeaders, id)
   500  	delete(dl.peerBlocks, id)
   501  	delete(dl.peerChainTds, id)
   502  
   503  	dl.downloader.UnregisterPeer(id)
   504  }
   505  
   506  type downloadTesterPeer struct {
   507  	dl    *downloadTester
   508  	id    string
   509  	delay time.Duration
   510  	lock  sync.RWMutex
   511  }
   512  
   513  // setDelay is a thread safe setter for the network delay value.
   514  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   515  	dlp.lock.Lock()
   516  	defer dlp.lock.Unlock()
   517  
   518  	dlp.delay = delay
   519  }
   520  
   521  // waitDelay is a thread safe way to sleep for the configured time.
   522  func (dlp *downloadTesterPeer) waitDelay() {
   523  	dlp.lock.RLock()
   524  	delay := dlp.delay
   525  	dlp.lock.RUnlock()
   526  
   527  	time.Sleep(delay)
   528  }
   529  
   530  // Head constructs a function to retrieve a peer's current head hash
   531  // and total difficulty.
   532  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   533  	dlp.dl.lock.RLock()
   534  	defer dlp.dl.lock.RUnlock()
   535  
   536  	return dlp.dl.peerHashes[dlp.id][0], nil
   537  }
   538  
   539  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   540  // origin; associated with a particular peer in the download tester. The returned
   541  // function can be used to retrieve batches of headers from the particular peer.
   542  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   543  	// Find the canonical number of the hash
   544  	dlp.dl.lock.RLock()
   545  	number := uint64(0)
   546  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   547  		if hash == origin {
   548  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   549  			break
   550  		}
   551  	}
   552  	dlp.dl.lock.RUnlock()
   553  
   554  	// Use the absolute header fetcher to satisfy the query
   555  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   556  }
   557  
   558  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   559  // origin; associated with a particular peer in the download tester. The returned
   560  // function can be used to retrieve batches of headers from the particular peer.
   561  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   562  	dlp.waitDelay()
   563  
   564  	dlp.dl.lock.RLock()
   565  	defer dlp.dl.lock.RUnlock()
   566  
   567  	// Gather the next batch of headers
   568  	hashes := dlp.dl.peerHashes[dlp.id]
   569  	headers := dlp.dl.peerHeaders[dlp.id]
   570  	result := make([]*types.Header, 0, amount)
   571  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   572  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   573  			result = append(result, header)
   574  		}
   575  	}
   576  	// Delay delivery a bit to allow attacks to unfold
   577  	go func() {
   578  		time.Sleep(time.Millisecond)
   579  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   580  	}()
   581  	return nil
   582  }
   583  
   584  // RequestBodies constructs a getBlockBodies method associated with a particular
   585  // peer in the download tester. The returned function can be used to retrieve
   586  // batches of block bodies from the particularly requested peer.
   587  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   588  	dlp.waitDelay()
   589  
   590  	dlp.dl.lock.RLock()
   591  	defer dlp.dl.lock.RUnlock()
   592  
   593  	blocks := dlp.dl.peerBlocks[dlp.id]
   594  
   595  	transactions := make([][]*types.Transaction, 0, len(hashes))
   596  	uncles := make([][]*types.Header, 0, len(hashes))
   597  
   598  	for _, hash := range hashes {
   599  		if block, ok := blocks[hash]; ok {
   600  			transactions = append(transactions, block.Transactions())
   601  			uncles = append(uncles, block.Uncles())
   602  		}
   603  	}
   604  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   605  
   606  	return nil
   607  }
   608  
   609  // RequestReceipts constructs a getReceipts method associated with a particular
   610  // peer in the download tester. The returned function can be used to retrieve
   611  // batches of block receipts from the particularly requested peer.
   612  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   613  	dlp.waitDelay()
   614  
   615  	dlp.dl.lock.RLock()
   616  	defer dlp.dl.lock.RUnlock()
   617  
   618  	receipts := dlp.dl.peerReceipts[dlp.id]
   619  
   620  	results := make([][]*types.Receipt, 0, len(hashes))
   621  	for _, hash := range hashes {
   622  		if receipt, ok := receipts[hash]; ok {
   623  			results = append(results, receipt)
   624  		}
   625  	}
   626  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   627  
   628  	return nil
   629  }
   630  
   631  // RequestNodeData constructs a getNodeData method associated with a particular
   632  // peer in the download tester. The returned function can be used to retrieve
   633  // batches of node state data from the particularly requested peer.
   634  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   635  	dlp.waitDelay()
   636  
   637  	dlp.dl.lock.RLock()
   638  	defer dlp.dl.lock.RUnlock()
   639  
   640  	results := make([][]byte, 0, len(hashes))
   641  	for _, hash := range hashes {
   642  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   643  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   644  				results = append(results, data)
   645  			}
   646  		}
   647  	}
   648  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   649  
   650  	return nil
   651  }
   652  
   653  // assertOwnChain checks if the local chain contains the correct number of items
   654  // of the various chain components.
   655  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   656  	assertOwnForkedChain(t, tester, 1, []int{length})
   657  }
   658  
   659  // assertOwnForkedChain checks if the local forked chain contains the correct
   660  // number of items of the various chain components.
   661  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   662  	// Initialize the counters for the first fork
   663  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   664  
   665  	if receipts < 0 {
   666  		receipts = 1
   667  	}
   668  	// Update the counters for each subsequent fork
   669  	for _, length := range lengths[1:] {
   670  		headers += length - common
   671  		blocks += length - common
   672  		receipts += length - common - fsMinFullBlocks
   673  	}
   674  	switch tester.downloader.mode {
   675  	case FullSync:
   676  		receipts = 1
   677  	case LightSync:
   678  		blocks, receipts = 1, 1
   679  	}
   680  	if hs := len(tester.ownHeaders); hs != headers {
   681  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   682  	}
   683  	if bs := len(tester.ownBlocks); bs != blocks {
   684  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   685  	}
   686  	if rs := len(tester.ownReceipts); rs != receipts {
   687  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   688  	}
   689  	// Verify the state trie too for fast syncs
   690  	/*if tester.downloader.mode == FastSync {
   691  		pivot := uint64(0)
   692  		var index int
   693  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   694  			index = pivot
   695  		} else {
   696  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   697  		}
   698  		if index > 0 {
   699  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   700  				t.Fatalf("state reconstruction failed: %v", err)
   701  			}
   702  		}
   703  	}*/
   704  }
   705  
   706  // Tests that simple synchronization against a canonical chain works correctly.
   707  // In this test common ancestor lookup should be short circuited and not require
   708  // binary searching.
   709  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   710  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   711  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   712  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   713  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   714  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   715  
   716  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   717  	t.Parallel()
   718  
   719  	tester := newTester()
   720  	defer tester.terminate()
   721  
   722  	// Create a small enough block chain to download
   723  	targetBlocks := blockCacheItems - 15
   724  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   725  
   726  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   727  
   728  	// Synchronise with the peer and make sure all relevant data was retrieved
   729  	if err := tester.sync("peer", nil, mode); err != nil {
   730  		t.Fatalf("failed to synchronise blocks: %v", err)
   731  	}
   732  	assertOwnChain(t, tester, targetBlocks+1)
   733  }
   734  
   735  // Tests that if a large batch of blocks are being downloaded, it is throttled
   736  // until the cached blocks are retrieved.
   737  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   738  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   739  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   740  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   741  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   742  
   743  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   744  	t.Parallel()
   745  	tester := newTester()
   746  	defer tester.terminate()
   747  
   748  	// Create a long block chain to download and the tester
   749  	targetBlocks := 8 * blockCacheItems
   750  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   751  
   752  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   753  
   754  	// Wrap the importer to allow stepping
   755  	blocked, proceed := uint32(0), make(chan struct{})
   756  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   757  		atomic.StoreUint32(&blocked, uint32(len(results)))
   758  		<-proceed
   759  	}
   760  	// Start a synchronisation concurrently
   761  	errc := make(chan error)
   762  	go func() {
   763  		errc <- tester.sync("peer", nil, mode)
   764  	}()
   765  	// Iteratively take some blocks, always checking the retrieval count
   766  	for {
   767  		// Check the retrieval count synchronously (! reason for this ugly block)
   768  		tester.lock.RLock()
   769  		retrieved := len(tester.ownBlocks)
   770  		tester.lock.RUnlock()
   771  		if retrieved >= targetBlocks+1 {
   772  			break
   773  		}
   774  		// Wait a bit for sync to throttle itself
   775  		var cached, frozen int
   776  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   777  			time.Sleep(25 * time.Millisecond)
   778  
   779  			tester.lock.Lock()
   780  			tester.downloader.queue.lock.Lock()
   781  			cached = len(tester.downloader.queue.blockDonePool)
   782  			if mode == FastSync {
   783  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   784  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   785  					cached = receipts
   786  					//}
   787  				}
   788  			}
   789  			frozen = int(atomic.LoadUint32(&blocked))
   790  			retrieved = len(tester.ownBlocks)
   791  			tester.downloader.queue.lock.Unlock()
   792  			tester.lock.Unlock()
   793  
   794  			if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
   795  				break
   796  			}
   797  		}
   798  		// Make sure we filled up the cache, then exhaust it
   799  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   800  
   801  		tester.lock.RLock()
   802  		retrieved = len(tester.ownBlocks)
   803  		tester.lock.RUnlock()
   804  		if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
   805  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   806  		}
   807  		// Permit the blocked blocks to import
   808  		if atomic.LoadUint32(&blocked) > 0 {
   809  			atomic.StoreUint32(&blocked, uint32(0))
   810  			proceed <- struct{}{}
   811  		}
   812  	}
   813  	// Check that we haven't pulled more blocks than available
   814  	assertOwnChain(t, tester, targetBlocks+1)
   815  	if err := <-errc; err != nil {
   816  		t.Fatalf("block synchronization failed: %v", err)
   817  	}
   818  }
   819  
   820  // Tests that simple synchronization against a forked chain works correctly. In
   821  // this test common ancestor lookup should *not* be short circuited, and a full
   822  // binary search should be executed.
   823  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   824  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   825  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   826  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   827  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   828  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   829  
   830  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   831  	t.Parallel()
   832  
   833  	tester := newTester()
   834  	defer tester.terminate()
   835  
   836  	// Create a long enough forked chain
   837  	common, fork := MaxHashFetch, 2*MaxHashFetch
   838  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   839  
   840  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   841  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   842  
   843  	// Synchronise with the peer and make sure all blocks were retrieved
   844  	if err := tester.sync("fork A", nil, mode); err != nil {
   845  		t.Fatalf("failed to synchronise blocks: %v", err)
   846  	}
   847  	assertOwnChain(t, tester, common+fork+1)
   848  
   849  	// Synchronise with the second peer and make sure that fork is pulled too
   850  	if err := tester.sync("fork B", nil, mode); err != nil {
   851  		t.Fatalf("failed to synchronise blocks: %v", err)
   852  	}
   853  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   854  }
   855  
   856  // Tests that synchronising against a much shorter but much heavyer fork works
   857  // corrently and is not dropped.
   858  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   859  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   860  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   861  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   862  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   863  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   864  
   865  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   866  	t.Parallel()
   867  
   868  	tester := newTester()
   869  	defer tester.terminate()
   870  
   871  	// Create a long enough forked chain
   872  	common, fork := MaxHashFetch, 4*MaxHashFetch
   873  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   874  
   875  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   876  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   877  
   878  	// Synchronise with the peer and make sure all blocks were retrieved
   879  	if err := tester.sync("light", nil, mode); err != nil {
   880  		t.Fatalf("failed to synchronise blocks: %v", err)
   881  	}
   882  	assertOwnChain(t, tester, common+fork+1)
   883  
   884  	// Synchronise with the second peer and make sure that fork is pulled too
   885  	if err := tester.sync("heavy", nil, mode); err != nil {
   886  		t.Fatalf("failed to synchronise blocks: %v", err)
   887  	}
   888  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   889  }
   890  
   891  // Tests that chain forks are contained within a certain interval of the current
   892  // chain head, ensuring that malicious peers cannot waste resources by feeding
   893  // long dead chains.
   894  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   895  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   896  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   897  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   898  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   899  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   900  
   901  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   902  	t.Parallel()
   903  
   904  	tester := newTester()
   905  	defer tester.terminate()
   906  
   907  	// Create a long enough forked chain
   908  	common, fork := 13, int(MaxForkAncestry+17)
   909  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   910  
   911  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   912  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   913  
   914  	// Synchronise with the peer and make sure all blocks were retrieved
   915  	if err := tester.sync("original", nil, mode); err != nil {
   916  		t.Fatalf("failed to synchronise blocks: %v", err)
   917  	}
   918  	assertOwnChain(t, tester, common+fork+1)
   919  
   920  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   921  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   922  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   923  	}
   924  }
   925  
   926  // Tests that chain forks are contained within a certain interval of the current
   927  // chain head for short but heavy forks too. These are a bit special because they
   928  // take different ancestor lookup paths.
   929  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   930  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   931  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   932  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   933  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   934  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   935  
   936  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   937  	t.Parallel()
   938  
   939  	tester := newTester()
   940  	defer tester.terminate()
   941  
   942  	// Create a long enough forked chain
   943  	common, fork := 13, int(MaxForkAncestry+17)
   944  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   945  
   946  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   947  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   948  
   949  	// Synchronise with the peer and make sure all blocks were retrieved
   950  	if err := tester.sync("original", nil, mode); err != nil {
   951  		t.Fatalf("failed to synchronise blocks: %v", err)
   952  	}
   953  	assertOwnChain(t, tester, common+fork+1)
   954  
   955  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   956  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   957  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   958  	}
   959  }
   960  
   961  // Tests that an inactive downloader will not accept incoming block headers and
   962  // bodies.
   963  func TestInactiveDownloader62(t *testing.T) {
   964  	t.Parallel()
   965  
   966  	tester := newTester()
   967  	defer tester.terminate()
   968  
   969  	// Check that neither block headers nor bodies are accepted
   970  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   971  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   972  	}
   973  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   974  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   975  	}
   976  }
   977  
   978  // Tests that an inactive downloader will not accept incoming block headers,
   979  // bodies and receipts.
   980  func TestInactiveDownloader63(t *testing.T) {
   981  	t.Parallel()
   982  
   983  	tester := newTester()
   984  	defer tester.terminate()
   985  
   986  	// Check that neither block headers nor bodies are accepted
   987  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   988  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   989  	}
   990  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   991  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   992  	}
   993  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   994  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   995  	}
   996  }
   997  
   998  // Tests that a canceled download wipes all previously accumulated state.
   999  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
  1000  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
  1001  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
  1002  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
  1003  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
  1004  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
  1005  
  1006  func testCancel(t *testing.T, protocol int, mode SyncMode) {
  1007  	t.Parallel()
  1008  
  1009  	tester := newTester()
  1010  	defer tester.terminate()
  1011  
  1012  	// Create a small enough block chain to download and the tester
  1013  	targetBlocks := blockCacheItems - 15
  1014  	if targetBlocks >= MaxHashFetch {
  1015  		targetBlocks = MaxHashFetch - 15
  1016  	}
  1017  	if targetBlocks >= MaxHeaderFetch {
  1018  		targetBlocks = MaxHeaderFetch - 15
  1019  	}
  1020  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1021  
  1022  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1023  
  1024  	// Make sure canceling works with a pristine downloader
  1025  	tester.downloader.Cancel()
  1026  	if !tester.downloader.queue.Idle() {
  1027  		t.Errorf("download queue not idle")
  1028  	}
  1029  	// Synchronise with the peer, but cancel afterwards
  1030  	if err := tester.sync("peer", nil, mode); err != nil {
  1031  		t.Fatalf("failed to synchronise blocks: %v", err)
  1032  	}
  1033  	tester.downloader.Cancel()
  1034  	if !tester.downloader.queue.Idle() {
  1035  		t.Errorf("download queue not idle")
  1036  	}
  1037  }
  1038  
  1039  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  1040  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
  1041  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
  1042  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
  1043  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
  1044  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
  1045  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
  1046  
  1047  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1048  	t.Parallel()
  1049  
  1050  	tester := newTester()
  1051  	defer tester.terminate()
  1052  
  1053  	// Create various peers with various parts of the chain
  1054  	targetPeers := 8
  1055  	targetBlocks := targetPeers*blockCacheItems - 15
  1056  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1057  
  1058  	for i := 0; i < targetPeers; i++ {
  1059  		id := fmt.Sprintf("peer #%d", i)
  1060  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
  1061  	}
  1062  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1063  		t.Fatalf("failed to synchronise blocks: %v", err)
  1064  	}
  1065  	assertOwnChain(t, tester, targetBlocks+1)
  1066  }
  1067  
  1068  // Tests that synchronisations behave well in multi-version protocol environments
  1069  // and not wreak havoc on other nodes in the network.
  1070  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1071  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1072  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1073  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1074  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1075  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1076  
  1077  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1078  	t.Parallel()
  1079  
  1080  	tester := newTester()
  1081  	defer tester.terminate()
  1082  
  1083  	// Create a small enough block chain to download
  1084  	targetBlocks := blockCacheItems - 15
  1085  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1086  
  1087  	// Create peers of every type
  1088  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1089  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1090  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1091  
  1092  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1093  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1094  		t.Fatalf("failed to synchronise blocks: %v", err)
  1095  	}
  1096  	assertOwnChain(t, tester, targetBlocks+1)
  1097  
  1098  	// Check that no peers have been dropped off
  1099  	for _, version := range []int{62, 63, 64} {
  1100  		peer := fmt.Sprintf("peer %d", version)
  1101  		if _, ok := tester.peerHashes[peer]; !ok {
  1102  			t.Errorf("%s dropped", peer)
  1103  		}
  1104  	}
  1105  }
  1106  
  1107  // Tests that if a block is empty (e.g. header only), no body request should be
  1108  // made, and instead the header should be assembled into a whole block in itself.
  1109  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1110  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1111  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1112  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1113  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1114  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1115  
  1116  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1117  	t.Parallel()
  1118  
  1119  	tester := newTester()
  1120  	defer tester.terminate()
  1121  
  1122  	// Create a block chain to download
  1123  	targetBlocks := 2*blockCacheItems - 15
  1124  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1125  
  1126  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1127  
  1128  	// Instrument the downloader to signal body requests
  1129  	bodiesHave, receiptsHave := int32(0), int32(0)
  1130  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1131  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1132  	}
  1133  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1134  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1135  	}
  1136  	// Synchronise with the peer and make sure all blocks were retrieved
  1137  	if err := tester.sync("peer", nil, mode); err != nil {
  1138  		t.Fatalf("failed to synchronise blocks: %v", err)
  1139  	}
  1140  	assertOwnChain(t, tester, targetBlocks+1)
  1141  
  1142  	// Validate the number of block bodies that should have been requested
  1143  	bodiesNeeded, receiptsNeeded := 0, 0
  1144  	for _, block := range blocks {
  1145  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1146  			bodiesNeeded++
  1147  		}
  1148  	}
  1149  	for _, receipt := range receipts {
  1150  		if mode == FastSync && len(receipt) > 0 {
  1151  			receiptsNeeded++
  1152  		}
  1153  	}
  1154  	if int(bodiesHave) != bodiesNeeded {
  1155  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1156  	}
  1157  	if int(receiptsHave) != receiptsNeeded {
  1158  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1159  	}
  1160  }
  1161  
  1162  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1163  // stalling the downloader by feeding gapped header chains.
  1164  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1165  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1166  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1167  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1168  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1169  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1170  
  1171  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1172  	t.Parallel()
  1173  
  1174  	tester := newTester()
  1175  	defer tester.terminate()
  1176  
  1177  	// Create a small enough block chain to download
  1178  	targetBlocks := blockCacheItems - 15
  1179  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1180  
  1181  	// Attempt a full sync with an attacker feeding gapped headers
  1182  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1183  	missing := targetBlocks / 2
  1184  	delete(tester.peerHeaders["attack"], hashes[missing])
  1185  
  1186  	if err := tester.sync("attack", nil, mode); err == nil {
  1187  		t.Fatalf("succeeded attacker synchronisation")
  1188  	}
  1189  	// Synchronise with the valid peer and make sure sync succeeds
  1190  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1191  	if err := tester.sync("valid", nil, mode); err != nil {
  1192  		t.Fatalf("failed to synchronise blocks: %v", err)
  1193  	}
  1194  	assertOwnChain(t, tester, targetBlocks+1)
  1195  }
  1196  
  1197  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1198  // detects the invalid numbering.
  1199  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1200  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1201  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1202  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1203  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1204  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1205  
  1206  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1207  	t.Parallel()
  1208  
  1209  	tester := newTester()
  1210  	defer tester.terminate()
  1211  
  1212  	// Create a small enough block chain to download
  1213  	targetBlocks := blockCacheItems - 15
  1214  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1215  
  1216  	// Attempt a full sync with an attacker feeding shifted headers
  1217  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1218  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1219  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1220  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1221  
  1222  	if err := tester.sync("attack", nil, mode); err == nil {
  1223  		t.Fatalf("succeeded attacker synchronisation")
  1224  	}
  1225  	// Synchronise with the valid peer and make sure sync succeeds
  1226  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1227  	if err := tester.sync("valid", nil, mode); err != nil {
  1228  		t.Fatalf("failed to synchronise blocks: %v", err)
  1229  	}
  1230  	assertOwnChain(t, tester, targetBlocks+1)
  1231  }
  1232  
  1233  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1234  // for various failure scenarios. Afterwards a full sync is attempted to make
  1235  // sure no state was corrupted.
  1236  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1237  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1238  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1239  
  1240  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1241  	t.Parallel()
  1242  
  1243  	tester := newTester()
  1244  	defer tester.terminate()
  1245  
  1246  	// Create a small enough block chain to download
  1247  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1248  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1249  
  1250  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1251  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1252  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1253  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1254  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1255  
  1256  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1257  		t.Fatalf("succeeded fast attacker synchronisation")
  1258  	}
  1259  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1260  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1261  	}
  1262  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1263  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1264  	// rolled back, and also the pivot point being reverted to a non-block status.
  1265  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1266  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1267  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1268  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1269  
  1270  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1271  		t.Fatalf("succeeded block attacker synchronisation")
  1272  	}
  1273  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1274  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1275  	}
  1276  	if mode == FastSync {
  1277  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1278  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1279  		}
  1280  	}
  1281  	// Attempt to sync with an attacker that withholds promised blocks after the
  1282  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1283  	// but already imported pivot block.
  1284  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1285  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1286  
  1287  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1288  		for i := missing; i <= len(hashes); i++ {
  1289  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1290  		}
  1291  		tester.downloader.syncInitHook = nil
  1292  	}
  1293  
  1294  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1295  		t.Fatalf("succeeded withholding attacker synchronisation")
  1296  	}
  1297  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1298  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1299  	}
  1300  	if mode == FastSync {
  1301  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1302  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1303  		}
  1304  	}
  1305  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1306  	// rollback should also disable fast syncing for this process, verify that we
  1307  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1308  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1309  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1310  	if err := tester.sync("valid", nil, mode); err != nil {
  1311  		t.Fatalf("failed to synchronise blocks: %v", err)
  1312  	}
  1313  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1314  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1315  	}
  1316  	if mode != LightSync {
  1317  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1318  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1319  		}
  1320  	}
  1321  }
  1322  
  1323  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1324  // afterwards by not sending any useful hashes.
  1325  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1326  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1327  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1328  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1329  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1330  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1331  
  1332  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1333  	t.Parallel()
  1334  
  1335  	tester := newTester()
  1336  	defer tester.terminate()
  1337  
  1338  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1339  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1340  
  1341  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1342  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1343  	}
  1344  }
  1345  
  1346  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1347  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1348  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1349  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1350  
  1351  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1352  	t.Parallel()
  1353  
  1354  	// Define the disconnection requirement for individual hash fetch errors
  1355  	tests := []struct {
  1356  		result error
  1357  		drop   bool
  1358  	}{
  1359  		{nil, false},                        // Sync succeeded, all is well
  1360  		{errBusy, false},                    // Sync is already in progress, no problem
  1361  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1362  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1363  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1364  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1365  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1366  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1367  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1368  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1369  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1370  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1371  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1372  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1373  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1374  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1375  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1376  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1377  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1378  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1379  	}
  1380  	// Run the tests and check disconnection status
  1381  	tester := newTester()
  1382  	defer tester.terminate()
  1383  
  1384  	for i, tt := range tests {
  1385  		// Register a new peer and ensure it's presence
  1386  		id := fmt.Sprintf("test %d", i)
  1387  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1388  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1389  		}
  1390  		if _, ok := tester.peerHashes[id]; !ok {
  1391  			t.Fatalf("test %d: registered peer not found", i)
  1392  		}
  1393  		// Simulate a synchronisation and check the required result
  1394  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1395  
  1396  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1397  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1398  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1399  		}
  1400  	}
  1401  }
  1402  
  1403  // Tests that synchronisation progress (origin block number, current block number
  1404  // and highest block number) is tracked and updated correctly.
  1405  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1406  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1407  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1408  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1409  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1410  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1411  
  1412  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1413  	t.Parallel()
  1414  
  1415  	tester := newTester()
  1416  	defer tester.terminate()
  1417  
  1418  	// Create a small enough block chain to download
  1419  	targetBlocks := blockCacheItems - 15
  1420  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1421  
  1422  	// Set a sync init hook to catch progress changes
  1423  	starting := make(chan struct{})
  1424  	progress := make(chan struct{})
  1425  
  1426  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1427  		starting <- struct{}{}
  1428  		<-progress
  1429  	}
  1430  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1431  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current != 0 || height != 0 {
  1432  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, 0, 0, 0)
  1433  	}
  1434  	// Synchronise half the blocks and check initial progress
  1435  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1436  	pending := new(sync.WaitGroup)
  1437  	pending.Add(1)
  1438  
  1439  	go func() {
  1440  		defer pending.Done()
  1441  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1442  			t.Fatal(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1443  		}
  1444  	}()
  1445  	<-starting
  1446  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current != 0 || height != uint64(targetBlocks/2+1) {
  1447  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, 0, 0, targetBlocks/2+1)
  1448  	}
  1449  	progress <- struct{}{}
  1450  	pending.Wait()
  1451  
  1452  	// Synchronise all the blocks and check continuation progress
  1453  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1454  	pending.Add(1)
  1455  
  1456  	go func() {
  1457  		defer pending.Done()
  1458  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1459  			t.Fatal(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1460  		}
  1461  	}()
  1462  	<-starting
  1463  	if start, current, height, _, _ := tester.downloader.Progress(); start != uint64(targetBlocks/2+1) || current != uint64(targetBlocks/2+1) || height != uint64(targetBlocks) {
  1464  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1465  	}
  1466  	progress <- struct{}{}
  1467  	pending.Wait()
  1468  
  1469  	// Check final progress after successful sync
  1470  	if start, current, height, _, _ := tester.downloader.Progress(); start != uint64(targetBlocks/2+1) || current != uint64(targetBlocks) || height != uint64(targetBlocks) {
  1471  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, targetBlocks/2+1, targetBlocks, targetBlocks)
  1472  	}
  1473  }
  1474  
  1475  // Tests that synchronisation progress (origin block number and highest block
  1476  // number) is tracked and updated correctly in case of a fork (or manual head
  1477  // revertal).
  1478  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1479  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1480  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1481  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1482  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1483  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1484  
  1485  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1486  	t.Parallel()
  1487  
  1488  	tester := newTester()
  1489  	defer tester.terminate()
  1490  
  1491  	// Create a forked chain to simulate origin revertal
  1492  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1493  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1494  
  1495  	// Set a sync init hook to catch progress changes
  1496  	starting := make(chan struct{})
  1497  	progress := make(chan struct{})
  1498  
  1499  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1500  		starting <- struct{}{}
  1501  		<-progress
  1502  	}
  1503  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1504  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current != 0 || height != 0 {
  1505  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, 0, 0, 0)
  1506  	}
  1507  	// Synchronise with one of the forks and check progress
  1508  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1509  	pending := new(sync.WaitGroup)
  1510  	pending.Add(1)
  1511  
  1512  	go func() {
  1513  		defer pending.Done()
  1514  		if err := tester.sync("fork A", nil, mode); err != nil {
  1515  			t.Fatal(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1516  		}
  1517  	}()
  1518  	<-starting
  1519  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current != 0 || height != uint64(len(hashesA)-1) {
  1520  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, 0, 0, len(hashesA)-1)
  1521  	}
  1522  	progress <- struct{}{}
  1523  	pending.Wait()
  1524  
  1525  	// Simulate a successful sync above the fork
  1526  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1527  
  1528  	// Synchronise with the second fork and check progress resets
  1529  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1530  	pending.Add(1)
  1531  
  1532  	go func() {
  1533  		defer pending.Done()
  1534  		if err := tester.sync("fork B", nil, mode); err != nil {
  1535  			t.Fatal(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1536  		}
  1537  	}()
  1538  	<-starting
  1539  	if start, current, height, _, _ := tester.downloader.Progress(); start != uint64(common) || current != uint64(len(hashesA)-1) || height != uint64(len(hashesB)-1) {
  1540  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, common, len(hashesA)-1, len(hashesB)-1)
  1541  	}
  1542  	progress <- struct{}{}
  1543  	pending.Wait()
  1544  
  1545  	// Check final progress after successful sync
  1546  	if start, current, height, _, _ := tester.downloader.Progress(); start != uint64(common) || current != uint64(len(hashesB)-1) || height != uint64(len(hashesB)-1) {
  1547  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, common, len(hashesB)-1, len(hashesB)-1)
  1548  	}
  1549  }
  1550  
  1551  // Tests that if synchronisation is aborted due to some failure, then the progress
  1552  // origin is not updated in the next sync cycle, as it should be considered the
  1553  // continuation of the previous sync and not a new instance.
  1554  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1555  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1556  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1557  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1558  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1559  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1560  
  1561  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1562  	t.Parallel()
  1563  
  1564  	tester := newTester()
  1565  	defer tester.terminate()
  1566  
  1567  	// Create a small enough block chain to download
  1568  	targetBlocks := blockCacheItems - 15
  1569  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1570  
  1571  	// Set a sync init hook to catch progress changes
  1572  	starting := make(chan struct{})
  1573  	progress := make(chan struct{})
  1574  
  1575  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1576  		starting <- struct{}{}
  1577  		<-progress
  1578  	}
  1579  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1580  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current != 0 || height != 0 {
  1581  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, 0, 0, 0)
  1582  	}
  1583  	// Attempt a full sync with a faulty peer
  1584  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1585  	missing := targetBlocks / 2
  1586  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1587  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1588  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1589  
  1590  	pending := new(sync.WaitGroup)
  1591  	pending.Add(1)
  1592  
  1593  	go func() {
  1594  		defer pending.Done()
  1595  		if err := tester.sync("faulty", nil, mode); err == nil {
  1596  			panic("succeeded faulty synchronisation")
  1597  		}
  1598  	}()
  1599  	<-starting
  1600  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current != 0 || height != uint64(targetBlocks) {
  1601  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, 0, 0, targetBlocks)
  1602  	}
  1603  	progress <- struct{}{}
  1604  	pending.Wait()
  1605  
  1606  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1607  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1608  	pending.Add(1)
  1609  
  1610  	go func() {
  1611  		defer pending.Done()
  1612  		if err := tester.sync("valid", nil, mode); err != nil {
  1613  			t.Fatal(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1614  		}
  1615  	}()
  1616  	<-starting
  1617  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current > uint64(targetBlocks/2) || height != uint64(targetBlocks) {
  1618  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", start, current, height, 0, targetBlocks/2, targetBlocks)
  1619  	}
  1620  	progress <- struct{}{}
  1621  	pending.Wait()
  1622  
  1623  	// Check final progress after successful sync
  1624  	if start, current, height, _, _ := tester.downloader.Progress(); start > uint64(targetBlocks/2) || current != uint64(targetBlocks) || height != uint64(targetBlocks) {
  1625  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", start, current, height, targetBlocks/2, targetBlocks, targetBlocks)
  1626  	}
  1627  }
  1628  
  1629  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1630  // the progress height is successfully reduced at the next sync invocation.
  1631  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1632  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1633  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1634  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1635  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1636  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1637  
  1638  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1639  	t.Parallel()
  1640  
  1641  	tester := newTester()
  1642  	defer tester.terminate()
  1643  
  1644  	// Create a small block chain
  1645  	targetBlocks := blockCacheItems - 15
  1646  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1647  
  1648  	// Set a sync init hook to catch progress changes
  1649  	starting := make(chan struct{})
  1650  	progress := make(chan struct{})
  1651  
  1652  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1653  		starting <- struct{}{}
  1654  		<-progress
  1655  	}
  1656  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1657  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current != 0 || height != 0 {
  1658  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, 0, 0, 0)
  1659  	}
  1660  	//  Create and sync with an attacker that promises a higher chain than available
  1661  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1662  	for i := 1; i < 3; i++ {
  1663  		delete(tester.peerHeaders["attack"], hashes[i])
  1664  		delete(tester.peerBlocks["attack"], hashes[i])
  1665  		delete(tester.peerReceipts["attack"], hashes[i])
  1666  	}
  1667  
  1668  	pending := new(sync.WaitGroup)
  1669  	pending.Add(1)
  1670  
  1671  	go func() {
  1672  		defer pending.Done()
  1673  		if err := tester.sync("attack", nil, mode); err == nil {
  1674  			panic("succeeded attacker synchronisation")
  1675  		}
  1676  	}()
  1677  	<-starting
  1678  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current != 0 || height != uint64(targetBlocks+3) {
  1679  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", start, current, height, 0, 0, targetBlocks+3)
  1680  	}
  1681  	progress <- struct{}{}
  1682  	pending.Wait()
  1683  
  1684  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1685  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1686  	pending.Add(1)
  1687  
  1688  	go func() {
  1689  		defer pending.Done()
  1690  		if err := tester.sync("valid", nil, mode); err != nil {
  1691  			t.Fatal(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1692  		}
  1693  	}()
  1694  	<-starting
  1695  	if start, current, height, _, _ := tester.downloader.Progress(); start != 0 || current > uint64(targetBlocks) || height != uint64(targetBlocks) {
  1696  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", start, current, height, 0, targetBlocks, targetBlocks)
  1697  	}
  1698  	progress <- struct{}{}
  1699  	pending.Wait()
  1700  
  1701  	// Check final progress after successful sync
  1702  	if start, current, height, _, _ := tester.downloader.Progress(); start > uint64(targetBlocks) || current != uint64(targetBlocks) || height != uint64(targetBlocks) {
  1703  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", start, current, height, targetBlocks, targetBlocks, targetBlocks)
  1704  	}
  1705  }
  1706  
  1707  // This test reproduces an issue where unexpected deliveries would
  1708  // block indefinitely if they arrived at the right time.
  1709  // We use data driven subtests to manage this so that it will be parallel on its own
  1710  // and not with the other tests, avoiding intermittent failures.
  1711  func TestDeliverHeadersHang(t *testing.T) {
  1712  	testCases := []struct {
  1713  		protocol int
  1714  		syncMode SyncMode
  1715  	}{
  1716  		{62, FullSync},
  1717  		{63, FullSync},
  1718  		{63, FastSync},
  1719  		{64, FullSync},
  1720  		{64, FastSync},
  1721  		{64, LightSync},
  1722  	}
  1723  	for _, tc := range testCases {
  1724  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1725  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1726  		})
  1727  	}
  1728  }
  1729  
  1730  type floodingTestPeer struct {
  1731  	peer   peer
  1732  	tester *downloadTester
  1733  }
  1734  
  1735  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.currentHead() }
  1736  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1737  	//return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1738  	return ftp.peer.getRelHeaders(hash, count, skip, reverse)
  1739  }
  1740  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1741  	return ftp.peer.getBlockBodies(hashes)
  1742  }
  1743  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1744  	return ftp.peer.getReceipts(hashes)
  1745  }
  1746  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1747  	return ftp.peer.getNodeData(hashes)
  1748  }
  1749  
  1750  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1751  	deliveriesDone := make(chan struct{}, 500)
  1752  	for i := 0; i < cap(deliveriesDone); i++ {
  1753  		peer := fmt.Sprintf("fake-peer%d", i)
  1754  		go func() {
  1755  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1756  			deliveriesDone <- struct{}{}
  1757  		}()
  1758  	}
  1759  	// Deliver the actual requested headers.
  1760  	go ftp.peer.getAbsHeaders(from, count, skip, reverse)
  1761  	// None of the extra deliveries should block.
  1762  	timeout := time.After(60 * time.Second)
  1763  	for i := 0; i < cap(deliveriesDone); i++ {
  1764  		select {
  1765  		case <-deliveriesDone:
  1766  		case <-timeout:
  1767  			panic("blocked")
  1768  		}
  1769  	}
  1770  	return nil
  1771  }
  1772  
  1773  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1774  	t.Parallel()
  1775  
  1776  	master := newTester()
  1777  	defer master.terminate()
  1778  
  1779  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1780  	for i := 0; i < 200; i++ {
  1781  
  1782  		tester := newTester()
  1783  		tester.peerDb = master.peerDb
  1784  
  1785  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1786  		tester.downloader.peers.lock.Lock()
  1787  		// Whenever the downloader requests headers, flood it with
  1788  		// a lot of unrequested header deliveries.
  1789  		ftp := &floodingTestPeer{
  1790  			*tester.downloader.peers.peers["peer"],
  1791  			tester,
  1792  		}
  1793  		tester.downloader.peers.peers["peer"] = &ftp.peer
  1794  		tester.downloader.peers.lock.Unlock()
  1795  		if err := tester.sync("peer", nil, mode); err != nil {
  1796  			t.Errorf("sync failed: %v", err)
  1797  		}
  1798  		tester.terminate()
  1799  
  1800  		// Flush all goroutines to prevent messing with subsequent tests
  1801  		//tester.downloader.peers.peers["peer"]
  1802  	}
  1803  }