github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/eth/downloader/downloader_test.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  package downloader
    13  
    14  import (
    15  	"errors"
    16  	"fmt"
    17  	"math/big"
    18  	"sync"
    19  	"sync/atomic"
    20  	"testing"
    21  	"time"
    22  
    23  	"github.com/Sberex/go-sberex/common"
    24  	"github.com/Sberex/go-sberex/consensus/ethash"
    25  	"github.com/Sberex/go-sberex/core"
    26  	"github.com/Sberex/go-sberex/core/types"
    27  	"github.com/Sberex/go-sberex/crypto"
    28  	"github.com/Sberex/go-sberex/ethdb"
    29  	"github.com/Sberex/go-sberex/event"
    30  	"github.com/Sberex/go-sberex/params"
    31  	"github.com/Sberex/go-sberex/trie"
    32  )
    33  
    34  var (
    35  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    36  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    37  )
    38  
    39  // Reduce some of the parameters to make the tester faster.
    40  func init() {
    41  	MaxForkAncestry = uint64(10000)
    42  	blockCacheItems = 1024
    43  	fsHeaderContCheck = 500 * time.Millisecond
    44  }
    45  
    46  // downloadTester is a test simulator for mocking out local block chain.
    47  type downloadTester struct {
    48  	downloader *Downloader
    49  
    50  	genesis *types.Block   // Genesis blocks used by the tester and peers
    51  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    52  	peerDb  ethdb.Database // Database of the peers containing all data
    53  
    54  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    55  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    56  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    57  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    58  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    59  
    60  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    61  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    62  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    63  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    64  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    65  
    66  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    67  
    68  	lock sync.RWMutex
    69  }
    70  
    71  // newTester creates a new downloader test mocker.
    72  func newTester() *downloadTester {
    73  	testdb, _ := ethdb.NewMemDatabase()
    74  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    75  
    76  	tester := &downloadTester{
    77  		genesis:           genesis,
    78  		peerDb:            testdb,
    79  		ownHashes:         []common.Hash{genesis.Hash()},
    80  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    81  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    82  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    83  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    84  		peerHashes:        make(map[string][]common.Hash),
    85  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    86  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    87  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    88  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    89  		peerMissingStates: make(map[string]map[common.Hash]bool),
    90  	}
    91  	tester.stateDb, _ = ethdb.NewMemDatabase()
    92  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    93  
    94  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
    95  
    96  	return tester
    97  }
    98  
    99  // makeChain creates a chain of n blocks starting at and including parent.
   100  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   101  // contains a transaction and every 5th an uncle to allow testing correct block
   102  // reassembly.
   103  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   104  	// Generate the block chain
   105  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), dl.peerDb, n, func(i int, block *core.BlockGen) {
   106  		block.SetCoinbase(common.Address{seed})
   107  
   108  		// If a heavy chain is requested, delay blocks to raise difficulty
   109  		if heavy {
   110  			block.OffsetTime(-1)
   111  		}
   112  		// If the block number is multiple of 3, send a bonus transaction to the miner
   113  		if parent == dl.genesis && i%3 == 0 {
   114  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   115  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   116  			if err != nil {
   117  				panic(err)
   118  			}
   119  			block.AddTx(tx)
   120  		}
   121  		// If the block number is a multiple of 5, add a bonus uncle to the block
   122  		if i > 0 && i%5 == 0 {
   123  			block.AddUncle(&types.Header{
   124  				ParentHash: block.PrevBlock(i - 1).Hash(),
   125  				Number:     big.NewInt(block.Number().Int64() - 1),
   126  			})
   127  		}
   128  	})
   129  	// Convert the block-chain into a hash-chain and header/block maps
   130  	hashes := make([]common.Hash, n+1)
   131  	hashes[len(hashes)-1] = parent.Hash()
   132  
   133  	headerm := make(map[common.Hash]*types.Header, n+1)
   134  	headerm[parent.Hash()] = parent.Header()
   135  
   136  	blockm := make(map[common.Hash]*types.Block, n+1)
   137  	blockm[parent.Hash()] = parent
   138  
   139  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   140  	receiptm[parent.Hash()] = parentReceipts
   141  
   142  	for i, b := range blocks {
   143  		hashes[len(hashes)-i-2] = b.Hash()
   144  		headerm[b.Hash()] = b.Header()
   145  		blockm[b.Hash()] = b
   146  		receiptm[b.Hash()] = receipts[i]
   147  	}
   148  	return hashes, headerm, blockm, receiptm
   149  }
   150  
   151  // makeChainFork creates two chains of length n, such that h1[:f] and
   152  // h2[:f] are different but have a common suffix of length n-f.
   153  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   154  	// Create the common suffix
   155  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   156  
   157  	// Create the forks, making the second heavyer if non balanced forks were requested
   158  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   159  	hashes1 = append(hashes1, hashes[1:]...)
   160  
   161  	heavy := false
   162  	if !balanced {
   163  		heavy = true
   164  	}
   165  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   166  	hashes2 = append(hashes2, hashes[1:]...)
   167  
   168  	for hash, header := range headers {
   169  		headers1[hash] = header
   170  		headers2[hash] = header
   171  	}
   172  	for hash, block := range blocks {
   173  		blocks1[hash] = block
   174  		blocks2[hash] = block
   175  	}
   176  	for hash, receipt := range receipts {
   177  		receipts1[hash] = receipt
   178  		receipts2[hash] = receipt
   179  	}
   180  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   181  }
   182  
   183  // terminate aborts any operations on the embedded downloader and releases all
   184  // held resources.
   185  func (dl *downloadTester) terminate() {
   186  	dl.downloader.Terminate()
   187  }
   188  
   189  // sync starts synchronizing with a remote peer, blocking until it completes.
   190  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   191  	dl.lock.RLock()
   192  	hash := dl.peerHashes[id][0]
   193  	// If no particular TD was requested, load from the peer's blockchain
   194  	if td == nil {
   195  		td = big.NewInt(1)
   196  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   197  			td = diff
   198  		}
   199  	}
   200  	dl.lock.RUnlock()
   201  
   202  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   203  	err := dl.downloader.synchronise(id, hash, td, mode)
   204  	select {
   205  	case <-dl.downloader.cancelCh:
   206  		// Ok, downloader fully cancelled after sync cycle
   207  	default:
   208  		// Downloader is still accepting packets, can block a peer up
   209  		panic("downloader active post sync cycle") // panic will be caught by tester
   210  	}
   211  	return err
   212  }
   213  
   214  // HasHeader checks if a header is present in the testers canonical chain.
   215  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   216  	return dl.GetHeaderByHash(hash) != nil
   217  }
   218  
   219  // HasBlock checks if a block is present in the testers canonical chain.
   220  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   221  	return dl.GetBlockByHash(hash) != nil
   222  }
   223  
   224  // GetHeader retrieves a header from the testers canonical chain.
   225  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   226  	dl.lock.RLock()
   227  	defer dl.lock.RUnlock()
   228  
   229  	return dl.ownHeaders[hash]
   230  }
   231  
   232  // GetBlock retrieves a block from the testers canonical chain.
   233  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   234  	dl.lock.RLock()
   235  	defer dl.lock.RUnlock()
   236  
   237  	return dl.ownBlocks[hash]
   238  }
   239  
   240  // CurrentHeader retrieves the current head header from the canonical chain.
   241  func (dl *downloadTester) CurrentHeader() *types.Header {
   242  	dl.lock.RLock()
   243  	defer dl.lock.RUnlock()
   244  
   245  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   246  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   247  			return header
   248  		}
   249  	}
   250  	return dl.genesis.Header()
   251  }
   252  
   253  // CurrentBlock retrieves the current head block from the canonical chain.
   254  func (dl *downloadTester) CurrentBlock() *types.Block {
   255  	dl.lock.RLock()
   256  	defer dl.lock.RUnlock()
   257  
   258  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   259  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   260  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   261  				return block
   262  			}
   263  		}
   264  	}
   265  	return dl.genesis
   266  }
   267  
   268  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   269  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   270  	dl.lock.RLock()
   271  	defer dl.lock.RUnlock()
   272  
   273  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   274  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   275  			return block
   276  		}
   277  	}
   278  	return dl.genesis
   279  }
   280  
   281  // FastSyncCommitHead manually sets the head block to a given hash.
   282  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   283  	// For now only check that the state trie is correct
   284  	if block := dl.GetBlockByHash(hash); block != nil {
   285  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0)
   286  		return err
   287  	}
   288  	return fmt.Errorf("non existent block: %x", hash[:4])
   289  }
   290  
   291  // GetTd retrieves the block's total difficulty from the canonical chain.
   292  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   293  	dl.lock.RLock()
   294  	defer dl.lock.RUnlock()
   295  
   296  	return dl.ownChainTd[hash]
   297  }
   298  
   299  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   300  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   301  	dl.lock.Lock()
   302  	defer dl.lock.Unlock()
   303  
   304  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   305  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   306  		return 0, errors.New("unknown parent")
   307  	}
   308  	for i := 1; i < len(headers); i++ {
   309  		if headers[i].ParentHash != headers[i-1].Hash() {
   310  			return i, errors.New("unknown parent")
   311  		}
   312  	}
   313  	// Do a full insert if pre-checks passed
   314  	for i, header := range headers {
   315  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   316  			continue
   317  		}
   318  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   319  			return i, errors.New("unknown parent")
   320  		}
   321  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   322  		dl.ownHeaders[header.Hash()] = header
   323  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   324  	}
   325  	return len(headers), nil
   326  }
   327  
   328  // InsertChain injects a new batch of blocks into the simulated chain.
   329  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   330  	dl.lock.Lock()
   331  	defer dl.lock.Unlock()
   332  
   333  	for i, block := range blocks {
   334  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   335  			return i, errors.New("unknown parent")
   336  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   337  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   338  		}
   339  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   340  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   341  			dl.ownHeaders[block.Hash()] = block.Header()
   342  		}
   343  		dl.ownBlocks[block.Hash()] = block
   344  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   345  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   346  	}
   347  	return len(blocks), nil
   348  }
   349  
   350  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   351  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   352  	dl.lock.Lock()
   353  	defer dl.lock.Unlock()
   354  
   355  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   356  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   357  			return i, errors.New("unknown owner")
   358  		}
   359  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   360  			return i, errors.New("unknown parent")
   361  		}
   362  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   363  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   364  	}
   365  	return len(blocks), nil
   366  }
   367  
   368  // Rollback removes some recently added elements from the chain.
   369  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   370  	dl.lock.Lock()
   371  	defer dl.lock.Unlock()
   372  
   373  	for i := len(hashes) - 1; i >= 0; i-- {
   374  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   375  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   376  		}
   377  		delete(dl.ownChainTd, hashes[i])
   378  		delete(dl.ownHeaders, hashes[i])
   379  		delete(dl.ownReceipts, hashes[i])
   380  		delete(dl.ownBlocks, hashes[i])
   381  	}
   382  }
   383  
   384  // newPeer registers a new block download source into the downloader.
   385  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   386  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   387  }
   388  
   389  // newSlowPeer registers a new block download source into the downloader, with a
   390  // specific delay time on processing the network packets sent to it, simulating
   391  // potentially slow network IO.
   392  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   393  	dl.lock.Lock()
   394  	defer dl.lock.Unlock()
   395  
   396  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   397  	if err == nil {
   398  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   399  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   400  		copy(dl.peerHashes[id], hashes)
   401  
   402  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   403  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   404  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   405  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   406  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   407  
   408  		genesis := hashes[len(hashes)-1]
   409  		if header := headers[genesis]; header != nil {
   410  			dl.peerHeaders[id][genesis] = header
   411  			dl.peerChainTds[id][genesis] = header.Difficulty
   412  		}
   413  		if block := blocks[genesis]; block != nil {
   414  			dl.peerBlocks[id][genesis] = block
   415  			dl.peerChainTds[id][genesis] = block.Difficulty()
   416  		}
   417  
   418  		for i := len(hashes) - 2; i >= 0; i-- {
   419  			hash := hashes[i]
   420  
   421  			if header, ok := headers[hash]; ok {
   422  				dl.peerHeaders[id][hash] = header
   423  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   424  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   425  				}
   426  			}
   427  			if block, ok := blocks[hash]; ok {
   428  				dl.peerBlocks[id][hash] = block
   429  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   430  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   431  				}
   432  			}
   433  			if receipt, ok := receipts[hash]; ok {
   434  				dl.peerReceipts[id][hash] = receipt
   435  			}
   436  		}
   437  	}
   438  	return err
   439  }
   440  
   441  // dropPeer simulates a hard peer removal from the connection pool.
   442  func (dl *downloadTester) dropPeer(id string) {
   443  	dl.lock.Lock()
   444  	defer dl.lock.Unlock()
   445  
   446  	delete(dl.peerHashes, id)
   447  	delete(dl.peerHeaders, id)
   448  	delete(dl.peerBlocks, id)
   449  	delete(dl.peerChainTds, id)
   450  
   451  	dl.downloader.UnregisterPeer(id)
   452  }
   453  
   454  type downloadTesterPeer struct {
   455  	dl    *downloadTester
   456  	id    string
   457  	delay time.Duration
   458  	lock  sync.RWMutex
   459  }
   460  
   461  // setDelay is a thread safe setter for the network delay value.
   462  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   463  	dlp.lock.Lock()
   464  	defer dlp.lock.Unlock()
   465  
   466  	dlp.delay = delay
   467  }
   468  
   469  // waitDelay is a thread safe way to sleep for the configured time.
   470  func (dlp *downloadTesterPeer) waitDelay() {
   471  	dlp.lock.RLock()
   472  	delay := dlp.delay
   473  	dlp.lock.RUnlock()
   474  
   475  	time.Sleep(delay)
   476  }
   477  
   478  // Head constructs a function to retrieve a peer's current head hash
   479  // and total difficulty.
   480  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   481  	dlp.dl.lock.RLock()
   482  	defer dlp.dl.lock.RUnlock()
   483  
   484  	return dlp.dl.peerHashes[dlp.id][0], nil
   485  }
   486  
   487  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   488  // origin; associated with a particular peer in the download tester. The returned
   489  // function can be used to retrieve batches of headers from the particular peer.
   490  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   491  	// Find the canonical number of the hash
   492  	dlp.dl.lock.RLock()
   493  	number := uint64(0)
   494  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   495  		if hash == origin {
   496  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   497  			break
   498  		}
   499  	}
   500  	dlp.dl.lock.RUnlock()
   501  
   502  	// Use the absolute header fetcher to satisfy the query
   503  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   504  }
   505  
   506  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   507  // origin; associated with a particular peer in the download tester. The returned
   508  // function can be used to retrieve batches of headers from the particular peer.
   509  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   510  	dlp.waitDelay()
   511  
   512  	dlp.dl.lock.RLock()
   513  	defer dlp.dl.lock.RUnlock()
   514  
   515  	// Gather the next batch of headers
   516  	hashes := dlp.dl.peerHashes[dlp.id]
   517  	headers := dlp.dl.peerHeaders[dlp.id]
   518  	result := make([]*types.Header, 0, amount)
   519  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   520  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   521  			result = append(result, header)
   522  		}
   523  	}
   524  	// Delay delivery a bit to allow attacks to unfold
   525  	go func() {
   526  		time.Sleep(time.Millisecond)
   527  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   528  	}()
   529  	return nil
   530  }
   531  
   532  // RequestBodies constructs a getBlockBodies method associated with a particular
   533  // peer in the download tester. The returned function can be used to retrieve
   534  // batches of block bodies from the particularly requested peer.
   535  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   536  	dlp.waitDelay()
   537  
   538  	dlp.dl.lock.RLock()
   539  	defer dlp.dl.lock.RUnlock()
   540  
   541  	blocks := dlp.dl.peerBlocks[dlp.id]
   542  
   543  	transactions := make([][]*types.Transaction, 0, len(hashes))
   544  	uncles := make([][]*types.Header, 0, len(hashes))
   545  
   546  	for _, hash := range hashes {
   547  		if block, ok := blocks[hash]; ok {
   548  			transactions = append(transactions, block.Transactions())
   549  			uncles = append(uncles, block.Uncles())
   550  		}
   551  	}
   552  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   553  
   554  	return nil
   555  }
   556  
   557  // RequestReceipts constructs a getReceipts method associated with a particular
   558  // peer in the download tester. The returned function can be used to retrieve
   559  // batches of block receipts from the particularly requested peer.
   560  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   561  	dlp.waitDelay()
   562  
   563  	dlp.dl.lock.RLock()
   564  	defer dlp.dl.lock.RUnlock()
   565  
   566  	receipts := dlp.dl.peerReceipts[dlp.id]
   567  
   568  	results := make([][]*types.Receipt, 0, len(hashes))
   569  	for _, hash := range hashes {
   570  		if receipt, ok := receipts[hash]; ok {
   571  			results = append(results, receipt)
   572  		}
   573  	}
   574  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   575  
   576  	return nil
   577  }
   578  
   579  // RequestNodeData constructs a getNodeData method associated with a particular
   580  // peer in the download tester. The returned function can be used to retrieve
   581  // batches of node state data from the particularly requested peer.
   582  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   583  	dlp.waitDelay()
   584  
   585  	dlp.dl.lock.RLock()
   586  	defer dlp.dl.lock.RUnlock()
   587  
   588  	results := make([][]byte, 0, len(hashes))
   589  	for _, hash := range hashes {
   590  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   591  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   592  				results = append(results, data)
   593  			}
   594  		}
   595  	}
   596  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   597  
   598  	return nil
   599  }
   600  
   601  // assertOwnChain checks if the local chain contains the correct number of items
   602  // of the various chain components.
   603  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   604  	assertOwnForkedChain(t, tester, 1, []int{length})
   605  }
   606  
   607  // assertOwnForkedChain checks if the local forked chain contains the correct
   608  // number of items of the various chain components.
   609  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   610  	// Initialize the counters for the first fork
   611  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   612  
   613  	if receipts < 0 {
   614  		receipts = 1
   615  	}
   616  	// Update the counters for each subsequent fork
   617  	for _, length := range lengths[1:] {
   618  		headers += length - common
   619  		blocks += length - common
   620  		receipts += length - common - fsMinFullBlocks
   621  	}
   622  	switch tester.downloader.mode {
   623  	case FullSync:
   624  		receipts = 1
   625  	case LightSync:
   626  		blocks, receipts = 1, 1
   627  	}
   628  	if hs := len(tester.ownHeaders); hs != headers {
   629  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   630  	}
   631  	if bs := len(tester.ownBlocks); bs != blocks {
   632  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   633  	}
   634  	if rs := len(tester.ownReceipts); rs != receipts {
   635  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   636  	}
   637  	// Verify the state trie too for fast syncs
   638  	/*if tester.downloader.mode == FastSync {
   639  		pivot := uint64(0)
   640  		var index int
   641  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   642  			index = pivot
   643  		} else {
   644  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   645  		}
   646  		if index > 0 {
   647  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   648  				t.Fatalf("state reconstruction failed: %v", err)
   649  			}
   650  		}
   651  	}*/
   652  }
   653  
   654  // Tests that simple synchronization against a canonical chain works correctly.
   655  // In this test common ancestor lookup should be short circuited and not require
   656  // binary searching.
   657  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   658  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   659  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   660  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   661  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   662  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   663  
   664  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   665  	t.Parallel()
   666  
   667  	tester := newTester()
   668  	defer tester.terminate()
   669  
   670  	// Create a small enough block chain to download
   671  	targetBlocks := blockCacheItems - 15
   672  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   673  
   674  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   675  
   676  	// Synchronise with the peer and make sure all relevant data was retrieved
   677  	if err := tester.sync("peer", nil, mode); err != nil {
   678  		t.Fatalf("failed to synchronise blocks: %v", err)
   679  	}
   680  	assertOwnChain(t, tester, targetBlocks+1)
   681  }
   682  
   683  // Tests that if a large batch of blocks are being downloaded, it is throttled
   684  // until the cached blocks are retrieved.
   685  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   686  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   687  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   688  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   689  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   690  
   691  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   692  	t.Parallel()
   693  	tester := newTester()
   694  	defer tester.terminate()
   695  
   696  	// Create a long block chain to download and the tester
   697  	targetBlocks := 8 * blockCacheItems
   698  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   699  
   700  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   701  
   702  	// Wrap the importer to allow stepping
   703  	blocked, proceed := uint32(0), make(chan struct{})
   704  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   705  		atomic.StoreUint32(&blocked, uint32(len(results)))
   706  		<-proceed
   707  	}
   708  	// Start a synchronisation concurrently
   709  	errc := make(chan error)
   710  	go func() {
   711  		errc <- tester.sync("peer", nil, mode)
   712  	}()
   713  	// Iteratively take some blocks, always checking the retrieval count
   714  	for {
   715  		// Check the retrieval count synchronously (! reason for this ugly block)
   716  		tester.lock.RLock()
   717  		retrieved := len(tester.ownBlocks)
   718  		tester.lock.RUnlock()
   719  		if retrieved >= targetBlocks+1 {
   720  			break
   721  		}
   722  		// Wait a bit for sync to throttle itself
   723  		var cached, frozen int
   724  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   725  			time.Sleep(25 * time.Millisecond)
   726  
   727  			tester.lock.Lock()
   728  			tester.downloader.queue.lock.Lock()
   729  			cached = len(tester.downloader.queue.blockDonePool)
   730  			if mode == FastSync {
   731  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   732  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   733  					cached = receipts
   734  					//}
   735  				}
   736  			}
   737  			frozen = int(atomic.LoadUint32(&blocked))
   738  			retrieved = len(tester.ownBlocks)
   739  			tester.downloader.queue.lock.Unlock()
   740  			tester.lock.Unlock()
   741  
   742  			if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
   743  				break
   744  			}
   745  		}
   746  		// Make sure we filled up the cache, then exhaust it
   747  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   748  
   749  		tester.lock.RLock()
   750  		retrieved = len(tester.ownBlocks)
   751  		tester.lock.RUnlock()
   752  		if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
   753  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   754  		}
   755  		// Permit the blocked blocks to import
   756  		if atomic.LoadUint32(&blocked) > 0 {
   757  			atomic.StoreUint32(&blocked, uint32(0))
   758  			proceed <- struct{}{}
   759  		}
   760  	}
   761  	// Check that we haven't pulled more blocks than available
   762  	assertOwnChain(t, tester, targetBlocks+1)
   763  	if err := <-errc; err != nil {
   764  		t.Fatalf("block synchronization failed: %v", err)
   765  	}
   766  }
   767  
   768  // Tests that simple synchronization against a forked chain works correctly. In
   769  // this test common ancestor lookup should *not* be short circuited, and a full
   770  // binary search should be executed.
   771  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   772  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   773  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   774  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   775  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   776  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   777  
   778  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   779  	t.Parallel()
   780  
   781  	tester := newTester()
   782  	defer tester.terminate()
   783  
   784  	// Create a long enough forked chain
   785  	common, fork := MaxHashFetch, 2*MaxHashFetch
   786  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   787  
   788  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   789  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   790  
   791  	// Synchronise with the peer and make sure all blocks were retrieved
   792  	if err := tester.sync("fork A", nil, mode); err != nil {
   793  		t.Fatalf("failed to synchronise blocks: %v", err)
   794  	}
   795  	assertOwnChain(t, tester, common+fork+1)
   796  
   797  	// Synchronise with the second peer and make sure that fork is pulled too
   798  	if err := tester.sync("fork B", nil, mode); err != nil {
   799  		t.Fatalf("failed to synchronise blocks: %v", err)
   800  	}
   801  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   802  }
   803  
   804  // Tests that synchronising against a much shorter but much heavyer fork works
   805  // corrently and is not dropped.
   806  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   807  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   808  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   809  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   810  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   811  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   812  
   813  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   814  	t.Parallel()
   815  
   816  	tester := newTester()
   817  	defer tester.terminate()
   818  
   819  	// Create a long enough forked chain
   820  	common, fork := MaxHashFetch, 4*MaxHashFetch
   821  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   822  
   823  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   824  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   825  
   826  	// Synchronise with the peer and make sure all blocks were retrieved
   827  	if err := tester.sync("light", nil, mode); err != nil {
   828  		t.Fatalf("failed to synchronise blocks: %v", err)
   829  	}
   830  	assertOwnChain(t, tester, common+fork+1)
   831  
   832  	// Synchronise with the second peer and make sure that fork is pulled too
   833  	if err := tester.sync("heavy", nil, mode); err != nil {
   834  		t.Fatalf("failed to synchronise blocks: %v", err)
   835  	}
   836  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   837  }
   838  
   839  // Tests that chain forks are contained within a certain interval of the current
   840  // chain head, ensuring that malicious peers cannot waste resources by feeding
   841  // long dead chains.
   842  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   843  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   844  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   845  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   846  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   847  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   848  
   849  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   850  	t.Parallel()
   851  
   852  	tester := newTester()
   853  	defer tester.terminate()
   854  
   855  	// Create a long enough forked chain
   856  	common, fork := 13, int(MaxForkAncestry+17)
   857  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   858  
   859  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   860  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   861  
   862  	// Synchronise with the peer and make sure all blocks were retrieved
   863  	if err := tester.sync("original", nil, mode); err != nil {
   864  		t.Fatalf("failed to synchronise blocks: %v", err)
   865  	}
   866  	assertOwnChain(t, tester, common+fork+1)
   867  
   868  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   869  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   870  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   871  	}
   872  }
   873  
   874  // Tests that chain forks are contained within a certain interval of the current
   875  // chain head for short but heavy forks too. These are a bit special because they
   876  // take different ancestor lookup paths.
   877  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   878  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   879  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   880  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   881  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   882  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   883  
   884  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   885  	t.Parallel()
   886  
   887  	tester := newTester()
   888  	defer tester.terminate()
   889  
   890  	// Create a long enough forked chain
   891  	common, fork := 13, int(MaxForkAncestry+17)
   892  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   893  
   894  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   895  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   896  
   897  	// Synchronise with the peer and make sure all blocks were retrieved
   898  	if err := tester.sync("original", nil, mode); err != nil {
   899  		t.Fatalf("failed to synchronise blocks: %v", err)
   900  	}
   901  	assertOwnChain(t, tester, common+fork+1)
   902  
   903  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   904  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   905  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   906  	}
   907  }
   908  
   909  // Tests that an inactive downloader will not accept incoming block headers and
   910  // bodies.
   911  func TestInactiveDownloader62(t *testing.T) {
   912  	t.Parallel()
   913  
   914  	tester := newTester()
   915  	defer tester.terminate()
   916  
   917  	// Check that neither block headers nor bodies are accepted
   918  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   919  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   920  	}
   921  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   922  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   923  	}
   924  }
   925  
   926  // Tests that an inactive downloader will not accept incoming block headers,
   927  // bodies and receipts.
   928  func TestInactiveDownloader63(t *testing.T) {
   929  	t.Parallel()
   930  
   931  	tester := newTester()
   932  	defer tester.terminate()
   933  
   934  	// Check that neither block headers nor bodies are accepted
   935  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   936  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   937  	}
   938  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   939  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   940  	}
   941  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   942  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   943  	}
   944  }
   945  
   946  // Tests that a canceled download wipes all previously accumulated state.
   947  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   948  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   949  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   950  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   951  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   952  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   953  
   954  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   955  	t.Parallel()
   956  
   957  	tester := newTester()
   958  	defer tester.terminate()
   959  
   960  	// Create a small enough block chain to download and the tester
   961  	targetBlocks := blockCacheItems - 15
   962  	if targetBlocks >= MaxHashFetch {
   963  		targetBlocks = MaxHashFetch - 15
   964  	}
   965  	if targetBlocks >= MaxHeaderFetch {
   966  		targetBlocks = MaxHeaderFetch - 15
   967  	}
   968  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   969  
   970  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   971  
   972  	// Make sure canceling works with a pristine downloader
   973  	tester.downloader.Cancel()
   974  	if !tester.downloader.queue.Idle() {
   975  		t.Errorf("download queue not idle")
   976  	}
   977  	// Synchronise with the peer, but cancel afterwards
   978  	if err := tester.sync("peer", nil, mode); err != nil {
   979  		t.Fatalf("failed to synchronise blocks: %v", err)
   980  	}
   981  	tester.downloader.Cancel()
   982  	if !tester.downloader.queue.Idle() {
   983  		t.Errorf("download queue not idle")
   984  	}
   985  }
   986  
   987  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   988  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   989  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   990  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   991  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   992  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   993  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   994  
   995  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   996  	t.Parallel()
   997  
   998  	tester := newTester()
   999  	defer tester.terminate()
  1000  
  1001  	// Create various peers with various parts of the chain
  1002  	targetPeers := 8
  1003  	targetBlocks := targetPeers*blockCacheItems - 15
  1004  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1005  
  1006  	for i := 0; i < targetPeers; i++ {
  1007  		id := fmt.Sprintf("peer #%d", i)
  1008  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
  1009  	}
  1010  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1011  		t.Fatalf("failed to synchronise blocks: %v", err)
  1012  	}
  1013  	assertOwnChain(t, tester, targetBlocks+1)
  1014  }
  1015  
  1016  // Tests that synchronisations behave well in multi-version protocol environments
  1017  // and not wreak havoc on other nodes in the network.
  1018  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1019  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1020  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1021  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1022  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1023  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1024  
  1025  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1026  	t.Parallel()
  1027  
  1028  	tester := newTester()
  1029  	defer tester.terminate()
  1030  
  1031  	// Create a small enough block chain to download
  1032  	targetBlocks := blockCacheItems - 15
  1033  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1034  
  1035  	// Create peers of every type
  1036  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1037  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1038  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1039  
  1040  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1041  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1042  		t.Fatalf("failed to synchronise blocks: %v", err)
  1043  	}
  1044  	assertOwnChain(t, tester, targetBlocks+1)
  1045  
  1046  	// Check that no peers have been dropped off
  1047  	for _, version := range []int{62, 63, 64} {
  1048  		peer := fmt.Sprintf("peer %d", version)
  1049  		if _, ok := tester.peerHashes[peer]; !ok {
  1050  			t.Errorf("%s dropped", peer)
  1051  		}
  1052  	}
  1053  }
  1054  
  1055  // Tests that if a block is empty (e.g. header only), no body request should be
  1056  // made, and instead the header should be assembled into a whole block in itself.
  1057  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1058  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1059  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1060  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1061  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1062  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1063  
  1064  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1065  	t.Parallel()
  1066  
  1067  	tester := newTester()
  1068  	defer tester.terminate()
  1069  
  1070  	// Create a block chain to download
  1071  	targetBlocks := 2*blockCacheItems - 15
  1072  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1073  
  1074  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1075  
  1076  	// Instrument the downloader to signal body requests
  1077  	bodiesHave, receiptsHave := int32(0), int32(0)
  1078  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1079  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1080  	}
  1081  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1082  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1083  	}
  1084  	// Synchronise with the peer and make sure all blocks were retrieved
  1085  	if err := tester.sync("peer", nil, mode); err != nil {
  1086  		t.Fatalf("failed to synchronise blocks: %v", err)
  1087  	}
  1088  	assertOwnChain(t, tester, targetBlocks+1)
  1089  
  1090  	// Validate the number of block bodies that should have been requested
  1091  	bodiesNeeded, receiptsNeeded := 0, 0
  1092  	for _, block := range blocks {
  1093  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1094  			bodiesNeeded++
  1095  		}
  1096  	}
  1097  	for _, receipt := range receipts {
  1098  		if mode == FastSync && len(receipt) > 0 {
  1099  			receiptsNeeded++
  1100  		}
  1101  	}
  1102  	if int(bodiesHave) != bodiesNeeded {
  1103  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1104  	}
  1105  	if int(receiptsHave) != receiptsNeeded {
  1106  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1107  	}
  1108  }
  1109  
  1110  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1111  // stalling the downloader by feeding gapped header chains.
  1112  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1113  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1114  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1115  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1116  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1117  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1118  
  1119  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1120  	t.Parallel()
  1121  
  1122  	tester := newTester()
  1123  	defer tester.terminate()
  1124  
  1125  	// Create a small enough block chain to download
  1126  	targetBlocks := blockCacheItems - 15
  1127  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1128  
  1129  	// Attempt a full sync with an attacker feeding gapped headers
  1130  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1131  	missing := targetBlocks / 2
  1132  	delete(tester.peerHeaders["attack"], hashes[missing])
  1133  
  1134  	if err := tester.sync("attack", nil, mode); err == nil {
  1135  		t.Fatalf("succeeded attacker synchronisation")
  1136  	}
  1137  	// Synchronise with the valid peer and make sure sync succeeds
  1138  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1139  	if err := tester.sync("valid", nil, mode); err != nil {
  1140  		t.Fatalf("failed to synchronise blocks: %v", err)
  1141  	}
  1142  	assertOwnChain(t, tester, targetBlocks+1)
  1143  }
  1144  
  1145  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1146  // detects the invalid numbering.
  1147  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1148  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1149  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1150  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1151  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1152  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1153  
  1154  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1155  	t.Parallel()
  1156  
  1157  	tester := newTester()
  1158  	defer tester.terminate()
  1159  
  1160  	// Create a small enough block chain to download
  1161  	targetBlocks := blockCacheItems - 15
  1162  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1163  
  1164  	// Attempt a full sync with an attacker feeding shifted headers
  1165  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1166  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1167  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1168  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1169  
  1170  	if err := tester.sync("attack", nil, mode); err == nil {
  1171  		t.Fatalf("succeeded attacker synchronisation")
  1172  	}
  1173  	// Synchronise with the valid peer and make sure sync succeeds
  1174  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1175  	if err := tester.sync("valid", nil, mode); err != nil {
  1176  		t.Fatalf("failed to synchronise blocks: %v", err)
  1177  	}
  1178  	assertOwnChain(t, tester, targetBlocks+1)
  1179  }
  1180  
  1181  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1182  // for various failure scenarios. Afterwards a full sync is attempted to make
  1183  // sure no state was corrupted.
  1184  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1185  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1186  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1187  
  1188  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1189  	t.Parallel()
  1190  
  1191  	tester := newTester()
  1192  	defer tester.terminate()
  1193  
  1194  	// Create a small enough block chain to download
  1195  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1196  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1197  
  1198  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1199  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1200  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1201  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1202  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1203  
  1204  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1205  		t.Fatalf("succeeded fast attacker synchronisation")
  1206  	}
  1207  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1208  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1209  	}
  1210  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1211  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1212  	// rolled back, and also the pivot point being reverted to a non-block status.
  1213  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1214  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1215  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1216  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1217  
  1218  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1219  		t.Fatalf("succeeded block attacker synchronisation")
  1220  	}
  1221  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1222  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1223  	}
  1224  	if mode == FastSync {
  1225  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1226  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1227  		}
  1228  	}
  1229  	// Attempt to sync with an attacker that withholds promised blocks after the
  1230  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1231  	// but already imported pivot block.
  1232  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1233  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1234  
  1235  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1236  		for i := missing; i <= len(hashes); i++ {
  1237  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1238  		}
  1239  		tester.downloader.syncInitHook = nil
  1240  	}
  1241  
  1242  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1243  		t.Fatalf("succeeded withholding attacker synchronisation")
  1244  	}
  1245  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1246  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1247  	}
  1248  	if mode == FastSync {
  1249  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1250  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1251  		}
  1252  	}
  1253  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1254  	// rollback should also disable fast syncing for this process, verify that we
  1255  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1256  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1257  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1258  	if err := tester.sync("valid", nil, mode); err != nil {
  1259  		t.Fatalf("failed to synchronise blocks: %v", err)
  1260  	}
  1261  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1262  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1263  	}
  1264  	if mode != LightSync {
  1265  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1266  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1267  		}
  1268  	}
  1269  }
  1270  
  1271  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1272  // afterwards by not sending any useful hashes.
  1273  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1274  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1275  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1276  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1277  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1278  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1279  
  1280  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1281  	t.Parallel()
  1282  
  1283  	tester := newTester()
  1284  	defer tester.terminate()
  1285  
  1286  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1287  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1288  
  1289  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1290  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1291  	}
  1292  }
  1293  
  1294  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1295  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1296  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1297  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1298  
  1299  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1300  	t.Parallel()
  1301  
  1302  	// Define the disconnection requirement for individual hash fetch errors
  1303  	tests := []struct {
  1304  		result error
  1305  		drop   bool
  1306  	}{
  1307  		{nil, false},                        // Sync succeeded, all is well
  1308  		{errBusy, false},                    // Sync is already in progress, no problem
  1309  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1310  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1311  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1312  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1313  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1314  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1315  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1316  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1317  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1318  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1319  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1320  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1321  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1322  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1323  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1324  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1325  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1326  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1327  	}
  1328  	// Run the tests and check disconnection status
  1329  	tester := newTester()
  1330  	defer tester.terminate()
  1331  
  1332  	for i, tt := range tests {
  1333  		// Register a new peer and ensure it's presence
  1334  		id := fmt.Sprintf("test %d", i)
  1335  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1336  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1337  		}
  1338  		if _, ok := tester.peerHashes[id]; !ok {
  1339  			t.Fatalf("test %d: registered peer not found", i)
  1340  		}
  1341  		// Simulate a synchronisation and check the required result
  1342  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1343  
  1344  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1345  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1346  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1347  		}
  1348  	}
  1349  }
  1350  
  1351  // Tests that synchronisation progress (origin block number, current block number
  1352  // and highest block number) is tracked and updated correctly.
  1353  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1354  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1355  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1356  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1357  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1358  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1359  
  1360  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1361  	t.Parallel()
  1362  
  1363  	tester := newTester()
  1364  	defer tester.terminate()
  1365  
  1366  	// Create a small enough block chain to download
  1367  	targetBlocks := blockCacheItems - 15
  1368  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1369  
  1370  	// Set a sync init hook to catch progress changes
  1371  	starting := make(chan struct{})
  1372  	progress := make(chan struct{})
  1373  
  1374  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1375  		starting <- struct{}{}
  1376  		<-progress
  1377  	}
  1378  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1379  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1380  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1381  	}
  1382  	// Synchronise half the blocks and check initial progress
  1383  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1384  	pending := new(sync.WaitGroup)
  1385  	pending.Add(1)
  1386  
  1387  	go func() {
  1388  		defer pending.Done()
  1389  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1390  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1391  		}
  1392  	}()
  1393  	<-starting
  1394  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1395  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1396  	}
  1397  	progress <- struct{}{}
  1398  	pending.Wait()
  1399  
  1400  	// Synchronise all the blocks and check continuation progress
  1401  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1402  	pending.Add(1)
  1403  
  1404  	go func() {
  1405  		defer pending.Done()
  1406  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1407  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1408  		}
  1409  	}()
  1410  	<-starting
  1411  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1412  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1413  	}
  1414  	progress <- struct{}{}
  1415  	pending.Wait()
  1416  
  1417  	// Check final progress after successful sync
  1418  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1419  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1420  	}
  1421  }
  1422  
  1423  // Tests that synchronisation progress (origin block number and highest block
  1424  // number) is tracked and updated correctly in case of a fork (or manual head
  1425  // revertal).
  1426  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1427  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1428  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1429  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1430  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1431  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1432  
  1433  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1434  	t.Parallel()
  1435  
  1436  	tester := newTester()
  1437  	defer tester.terminate()
  1438  
  1439  	// Create a forked chain to simulate origin revertal
  1440  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1441  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1442  
  1443  	// Set a sync init hook to catch progress changes
  1444  	starting := make(chan struct{})
  1445  	progress := make(chan struct{})
  1446  
  1447  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1448  		starting <- struct{}{}
  1449  		<-progress
  1450  	}
  1451  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1452  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1453  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1454  	}
  1455  	// Synchronise with one of the forks and check progress
  1456  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1457  	pending := new(sync.WaitGroup)
  1458  	pending.Add(1)
  1459  
  1460  	go func() {
  1461  		defer pending.Done()
  1462  		if err := tester.sync("fork A", nil, mode); err != nil {
  1463  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1464  		}
  1465  	}()
  1466  	<-starting
  1467  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1468  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1469  	}
  1470  	progress <- struct{}{}
  1471  	pending.Wait()
  1472  
  1473  	// Simulate a successful sync above the fork
  1474  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1475  
  1476  	// Synchronise with the second fork and check progress resets
  1477  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1478  	pending.Add(1)
  1479  
  1480  	go func() {
  1481  		defer pending.Done()
  1482  		if err := tester.sync("fork B", nil, mode); err != nil {
  1483  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1484  		}
  1485  	}()
  1486  	<-starting
  1487  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1488  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1489  	}
  1490  	progress <- struct{}{}
  1491  	pending.Wait()
  1492  
  1493  	// Check final progress after successful sync
  1494  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1495  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1496  	}
  1497  }
  1498  
  1499  // Tests that if synchronisation is aborted due to some failure, then the progress
  1500  // origin is not updated in the next sync cycle, as it should be considered the
  1501  // continuation of the previous sync and not a new instance.
  1502  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1503  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1504  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1505  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1506  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1507  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1508  
  1509  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1510  	t.Parallel()
  1511  
  1512  	tester := newTester()
  1513  	defer tester.terminate()
  1514  
  1515  	// Create a small enough block chain to download
  1516  	targetBlocks := blockCacheItems - 15
  1517  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1518  
  1519  	// Set a sync init hook to catch progress changes
  1520  	starting := make(chan struct{})
  1521  	progress := make(chan struct{})
  1522  
  1523  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1524  		starting <- struct{}{}
  1525  		<-progress
  1526  	}
  1527  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1528  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1529  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1530  	}
  1531  	// Attempt a full sync with a faulty peer
  1532  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1533  	missing := targetBlocks / 2
  1534  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1535  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1536  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1537  
  1538  	pending := new(sync.WaitGroup)
  1539  	pending.Add(1)
  1540  
  1541  	go func() {
  1542  		defer pending.Done()
  1543  		if err := tester.sync("faulty", nil, mode); err == nil {
  1544  			panic("succeeded faulty synchronisation")
  1545  		}
  1546  	}()
  1547  	<-starting
  1548  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1549  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1550  	}
  1551  	progress <- struct{}{}
  1552  	pending.Wait()
  1553  
  1554  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1555  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1556  	pending.Add(1)
  1557  
  1558  	go func() {
  1559  		defer pending.Done()
  1560  		if err := tester.sync("valid", nil, mode); err != nil {
  1561  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1562  		}
  1563  	}()
  1564  	<-starting
  1565  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1566  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1567  	}
  1568  	progress <- struct{}{}
  1569  	pending.Wait()
  1570  
  1571  	// Check final progress after successful sync
  1572  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1573  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1574  	}
  1575  }
  1576  
  1577  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1578  // the progress height is successfully reduced at the next sync invocation.
  1579  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1580  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1581  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1582  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1583  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1584  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1585  
  1586  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1587  	t.Parallel()
  1588  
  1589  	tester := newTester()
  1590  	defer tester.terminate()
  1591  
  1592  	// Create a small block chain
  1593  	targetBlocks := blockCacheItems - 15
  1594  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1595  
  1596  	// Set a sync init hook to catch progress changes
  1597  	starting := make(chan struct{})
  1598  	progress := make(chan struct{})
  1599  
  1600  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1601  		starting <- struct{}{}
  1602  		<-progress
  1603  	}
  1604  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1605  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1606  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1607  	}
  1608  	//  Create and sync with an attacker that promises a higher chain than available
  1609  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1610  	for i := 1; i < 3; i++ {
  1611  		delete(tester.peerHeaders["attack"], hashes[i])
  1612  		delete(tester.peerBlocks["attack"], hashes[i])
  1613  		delete(tester.peerReceipts["attack"], hashes[i])
  1614  	}
  1615  
  1616  	pending := new(sync.WaitGroup)
  1617  	pending.Add(1)
  1618  
  1619  	go func() {
  1620  		defer pending.Done()
  1621  		if err := tester.sync("attack", nil, mode); err == nil {
  1622  			panic("succeeded attacker synchronisation")
  1623  		}
  1624  	}()
  1625  	<-starting
  1626  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1627  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1628  	}
  1629  	progress <- struct{}{}
  1630  	pending.Wait()
  1631  
  1632  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1633  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1634  	pending.Add(1)
  1635  
  1636  	go func() {
  1637  		defer pending.Done()
  1638  		if err := tester.sync("valid", nil, mode); err != nil {
  1639  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1640  		}
  1641  	}()
  1642  	<-starting
  1643  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1644  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1645  	}
  1646  	progress <- struct{}{}
  1647  	pending.Wait()
  1648  
  1649  	// Check final progress after successful sync
  1650  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1651  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1652  	}
  1653  }
  1654  
  1655  // This test reproduces an issue where unexpected deliveries would
  1656  // block indefinitely if they arrived at the right time.
  1657  // We use data driven subtests to manage this so that it will be parallel on its own
  1658  // and not with the other tests, avoiding intermittent failures.
  1659  func TestDeliverHeadersHang(t *testing.T) {
  1660  	testCases := []struct {
  1661  		protocol int
  1662  		syncMode SyncMode
  1663  	}{
  1664  		{62, FullSync},
  1665  		{63, FullSync},
  1666  		{63, FastSync},
  1667  		{64, FullSync},
  1668  		{64, FastSync},
  1669  		{64, LightSync},
  1670  	}
  1671  	for _, tc := range testCases {
  1672  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1673  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1674  		})
  1675  	}
  1676  }
  1677  
  1678  type floodingTestPeer struct {
  1679  	peer   Peer
  1680  	tester *downloadTester
  1681  	pend   sync.WaitGroup
  1682  }
  1683  
  1684  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1685  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1686  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1687  }
  1688  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1689  	return ftp.peer.RequestBodies(hashes)
  1690  }
  1691  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1692  	return ftp.peer.RequestReceipts(hashes)
  1693  }
  1694  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1695  	return ftp.peer.RequestNodeData(hashes)
  1696  }
  1697  
  1698  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1699  	deliveriesDone := make(chan struct{}, 500)
  1700  	for i := 0; i < cap(deliveriesDone); i++ {
  1701  		peer := fmt.Sprintf("fake-peer%d", i)
  1702  		ftp.pend.Add(1)
  1703  
  1704  		go func() {
  1705  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1706  			deliveriesDone <- struct{}{}
  1707  			ftp.pend.Done()
  1708  		}()
  1709  	}
  1710  	// Deliver the actual requested headers.
  1711  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1712  	// None of the extra deliveries should block.
  1713  	timeout := time.After(60 * time.Second)
  1714  	for i := 0; i < cap(deliveriesDone); i++ {
  1715  		select {
  1716  		case <-deliveriesDone:
  1717  		case <-timeout:
  1718  			panic("blocked")
  1719  		}
  1720  	}
  1721  	return nil
  1722  }
  1723  
  1724  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1725  	t.Parallel()
  1726  
  1727  	master := newTester()
  1728  	defer master.terminate()
  1729  
  1730  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1731  	for i := 0; i < 200; i++ {
  1732  		tester := newTester()
  1733  		tester.peerDb = master.peerDb
  1734  
  1735  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1736  		// Whenever the downloader requests headers, flood it with
  1737  		// a lot of unrequested header deliveries.
  1738  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1739  			peer:   tester.downloader.peers.peers["peer"].peer,
  1740  			tester: tester,
  1741  		}
  1742  		if err := tester.sync("peer", nil, mode); err != nil {
  1743  			t.Errorf("test %d: sync failed: %v", i, err)
  1744  		}
  1745  		tester.terminate()
  1746  
  1747  		// Flush all goroutines to prevent messing with subsequent tests
  1748  		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1749  	}
  1750  }