github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/common"
    29  	"github.com/ethereum/go-ethereum/consensus/ethash"
    30  	"github.com/ethereum/go-ethereum/core"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/crypto"
    33  	"github.com/ethereum/go-ethereum/ethdb"
    34  	"github.com/ethereum/go-ethereum/event"
    35  	"github.com/ethereum/go-ethereum/params"
    36  	"github.com/ethereum/go-ethereum/trie"
    37  )
    38  
    39  var (
    40  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    41  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    42  )
    43  
    44  // Reduce some of the parameters to make the tester faster.
    45  func init() {
    46  	MaxForkAncestry = uint64(10000)
    47  	blockCacheItems = 1024
    48  	fsHeaderContCheck = 500 * time.Millisecond
    49  }
    50  
    51  // downloadTester is a test simulator for mocking out local block chain.
    52  type downloadTester struct {
    53  	downloader *Downloader
    54  
    55  	genesis *types.Block   // Genesis blocks used by the tester and peers
    56  	stateDb ethdb.Database // Database used by the tester for syncing from peers
    57  	peerDb  ethdb.Database // Database of the peers containing all data
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    66  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    67  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    68  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    69  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    70  
    71  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    72  
    73  	lock sync.RWMutex
    74  }
    75  
    76  // newTester creates a new downloader test mocker.
    77  func newTester() *downloadTester {
    78  	testdb := ethdb.NewMemDatabase()
    79  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    80  
    81  	tester := &downloadTester{
    82  		genesis:           genesis,
    83  		peerDb:            testdb,
    84  		ownHashes:         []common.Hash{genesis.Hash()},
    85  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    86  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    87  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    88  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    89  		peerHashes:        make(map[string][]common.Hash),
    90  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    91  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    92  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    93  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    94  		peerMissingStates: make(map[string]map[common.Hash]bool),
    95  	}
    96  	tester.stateDb = ethdb.NewMemDatabase()
    97  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    98  
    99  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   100  
   101  	return tester
   102  }
   103  
   104  // makeChain creates a chain of n blocks starting at and including parent.
   105  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   106  // contains a transaction and every 5th an uncle to allow testing correct block
   107  // reassembly.
   108  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   109  	// Generate the block chain
   110  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), dl.peerDb, n, func(i int, block *core.BlockGen) {
   111  		block.SetCoinbase(common.Address{seed})
   112  
   113  		// If a heavy chain is requested, delay blocks to raise difficulty
   114  		if heavy {
   115  			block.OffsetTime(-1)
   116  		}
   117  		// If the block number is multiple of 3, send a bonus transaction to the miner
   118  		if parent == dl.genesis && i%3 == 0 {
   119  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   120  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   121  			if err != nil {
   122  				panic(err)
   123  			}
   124  			block.AddTx(tx)
   125  		}
   126  		// If the block number is a multiple of 5, add a bonus uncle to the block
   127  		if i > 0 && i%5 == 0 {
   128  			block.AddUncle(&types.Header{
   129  				ParentHash: block.PrevBlock(i - 1).Hash(),
   130  				Number:     big.NewInt(block.Number().Int64() - 1),
   131  			})
   132  		}
   133  	})
   134  	// Convert the block-chain into a hash-chain and header/block maps
   135  	hashes := make([]common.Hash, n+1)
   136  	hashes[len(hashes)-1] = parent.Hash()
   137  
   138  	headerm := make(map[common.Hash]*types.Header, n+1)
   139  	headerm[parent.Hash()] = parent.Header()
   140  
   141  	blockm := make(map[common.Hash]*types.Block, n+1)
   142  	blockm[parent.Hash()] = parent
   143  
   144  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   145  	receiptm[parent.Hash()] = parentReceipts
   146  
   147  	for i, b := range blocks {
   148  		hashes[len(hashes)-i-2] = b.Hash()
   149  		headerm[b.Hash()] = b.Header()
   150  		blockm[b.Hash()] = b
   151  		receiptm[b.Hash()] = receipts[i]
   152  	}
   153  	return hashes, headerm, blockm, receiptm
   154  }
   155  
   156  // makeChainFork creates two chains of length n, such that h1[:f] and
   157  // h2[:f] are different but have a common suffix of length n-f.
   158  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   159  	// Create the common suffix
   160  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   161  
   162  	// Create the forks, making the second heavier if non balanced forks were requested
   163  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   164  	hashes1 = append(hashes1, hashes[1:]...)
   165  
   166  	heavy := false
   167  	if !balanced {
   168  		heavy = true
   169  	}
   170  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   171  	hashes2 = append(hashes2, hashes[1:]...)
   172  
   173  	for hash, header := range headers {
   174  		headers1[hash] = header
   175  		headers2[hash] = header
   176  	}
   177  	for hash, block := range blocks {
   178  		blocks1[hash] = block
   179  		blocks2[hash] = block
   180  	}
   181  	for hash, receipt := range receipts {
   182  		receipts1[hash] = receipt
   183  		receipts2[hash] = receipt
   184  	}
   185  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   186  }
   187  
   188  // terminate aborts any operations on the embedded downloader and releases all
   189  // held resources.
   190  func (dl *downloadTester) terminate() {
   191  	dl.downloader.Terminate()
   192  }
   193  
   194  // sync starts synchronizing with a remote peer, blocking until it completes.
   195  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   196  	dl.lock.RLock()
   197  	hash := dl.peerHashes[id][0]
   198  	// If no particular TD was requested, load from the peer's blockchain
   199  	if td == nil {
   200  		td = big.NewInt(1)
   201  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   202  			td = diff
   203  		}
   204  	}
   205  	dl.lock.RUnlock()
   206  
   207  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   208  	err := dl.downloader.synchronise(id, hash, td, mode)
   209  	select {
   210  	case <-dl.downloader.cancelCh:
   211  		// Ok, downloader fully cancelled after sync cycle
   212  	default:
   213  		// Downloader is still accepting packets, can block a peer up
   214  		panic("downloader active post sync cycle") // panic will be caught by tester
   215  	}
   216  	return err
   217  }
   218  
   219  // HasHeader checks if a header is present in the testers canonical chain.
   220  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   221  	return dl.GetHeaderByHash(hash) != nil
   222  }
   223  
   224  // HasBlock checks if a block is present in the testers canonical chain.
   225  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   226  	return dl.GetBlockByHash(hash) != nil
   227  }
   228  
   229  // GetHeader retrieves a header from the testers canonical chain.
   230  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   231  	dl.lock.RLock()
   232  	defer dl.lock.RUnlock()
   233  
   234  	return dl.ownHeaders[hash]
   235  }
   236  
   237  // GetBlock retrieves a block from the testers canonical chain.
   238  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   239  	dl.lock.RLock()
   240  	defer dl.lock.RUnlock()
   241  
   242  	return dl.ownBlocks[hash]
   243  }
   244  
   245  // CurrentHeader retrieves the current head header from the canonical chain.
   246  func (dl *downloadTester) CurrentHeader() *types.Header {
   247  	dl.lock.RLock()
   248  	defer dl.lock.RUnlock()
   249  
   250  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   251  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   252  			return header
   253  		}
   254  	}
   255  	return dl.genesis.Header()
   256  }
   257  
   258  // CurrentBlock retrieves the current head block from the canonical chain.
   259  func (dl *downloadTester) CurrentBlock() *types.Block {
   260  	dl.lock.RLock()
   261  	defer dl.lock.RUnlock()
   262  
   263  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   264  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   265  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   266  				return block
   267  			}
   268  		}
   269  	}
   270  	return dl.genesis
   271  }
   272  
   273  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   274  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   275  	dl.lock.RLock()
   276  	defer dl.lock.RUnlock()
   277  
   278  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   279  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   280  			return block
   281  		}
   282  	}
   283  	return dl.genesis
   284  }
   285  
   286  // FastSyncCommitHead manually sets the head block to a given hash.
   287  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   288  	// For now only check that the state trie is correct
   289  	if block := dl.GetBlockByHash(hash); block != nil {
   290  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0)
   291  		return err
   292  	}
   293  	return fmt.Errorf("non existent block: %x", hash[:4])
   294  }
   295  
   296  // GetTd retrieves the block's total difficulty from the canonical chain.
   297  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   298  	dl.lock.RLock()
   299  	defer dl.lock.RUnlock()
   300  
   301  	return dl.ownChainTd[hash]
   302  }
   303  
   304  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   305  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   306  	dl.lock.Lock()
   307  	defer dl.lock.Unlock()
   308  
   309  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   310  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   311  		return 0, errors.New("unknown parent")
   312  	}
   313  	for i := 1; i < len(headers); i++ {
   314  		if headers[i].ParentHash != headers[i-1].Hash() {
   315  			return i, errors.New("unknown parent")
   316  		}
   317  	}
   318  	// Do a full insert if pre-checks passed
   319  	for i, header := range headers {
   320  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   321  			continue
   322  		}
   323  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   324  			return i, errors.New("unknown parent")
   325  		}
   326  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   327  		dl.ownHeaders[header.Hash()] = header
   328  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   329  	}
   330  	return len(headers), nil
   331  }
   332  
   333  // InsertChain injects a new batch of blocks into the simulated chain.
   334  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   335  	dl.lock.Lock()
   336  	defer dl.lock.Unlock()
   337  
   338  	for i, block := range blocks {
   339  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   340  			return i, errors.New("unknown parent")
   341  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   342  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   343  		}
   344  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   345  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   346  			dl.ownHeaders[block.Hash()] = block.Header()
   347  		}
   348  		dl.ownBlocks[block.Hash()] = block
   349  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   350  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   351  	}
   352  	return len(blocks), nil
   353  }
   354  
   355  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   356  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   357  	dl.lock.Lock()
   358  	defer dl.lock.Unlock()
   359  
   360  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   361  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   362  			return i, errors.New("unknown owner")
   363  		}
   364  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   365  			return i, errors.New("unknown parent")
   366  		}
   367  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   368  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   369  	}
   370  	return len(blocks), nil
   371  }
   372  
   373  // Rollback removes some recently added elements from the chain.
   374  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   375  	dl.lock.Lock()
   376  	defer dl.lock.Unlock()
   377  
   378  	for i := len(hashes) - 1; i >= 0; i-- {
   379  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   380  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   381  		}
   382  		delete(dl.ownChainTd, hashes[i])
   383  		delete(dl.ownHeaders, hashes[i])
   384  		delete(dl.ownReceipts, hashes[i])
   385  		delete(dl.ownBlocks, hashes[i])
   386  	}
   387  }
   388  
   389  // newPeer registers a new block download source into the downloader.
   390  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   391  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   392  }
   393  
   394  // newSlowPeer registers a new block download source into the downloader, with a
   395  // specific delay time on processing the network packets sent to it, simulating
   396  // potentially slow network IO.
   397  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   398  	dl.lock.Lock()
   399  	defer dl.lock.Unlock()
   400  
   401  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   402  	if err == nil {
   403  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   404  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   405  		copy(dl.peerHashes[id], hashes)
   406  
   407  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   408  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   409  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   410  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   411  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   412  
   413  		genesis := hashes[len(hashes)-1]
   414  		if header := headers[genesis]; header != nil {
   415  			dl.peerHeaders[id][genesis] = header
   416  			dl.peerChainTds[id][genesis] = header.Difficulty
   417  		}
   418  		if block := blocks[genesis]; block != nil {
   419  			dl.peerBlocks[id][genesis] = block
   420  			dl.peerChainTds[id][genesis] = block.Difficulty()
   421  		}
   422  
   423  		for i := len(hashes) - 2; i >= 0; i-- {
   424  			hash := hashes[i]
   425  
   426  			if header, ok := headers[hash]; ok {
   427  				dl.peerHeaders[id][hash] = header
   428  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   429  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   430  				}
   431  			}
   432  			if block, ok := blocks[hash]; ok {
   433  				dl.peerBlocks[id][hash] = block
   434  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   435  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   436  				}
   437  			}
   438  			if receipt, ok := receipts[hash]; ok {
   439  				dl.peerReceipts[id][hash] = receipt
   440  			}
   441  		}
   442  	}
   443  	return err
   444  }
   445  
   446  // dropPeer simulates a hard peer removal from the connection pool.
   447  func (dl *downloadTester) dropPeer(id string) {
   448  	dl.lock.Lock()
   449  	defer dl.lock.Unlock()
   450  
   451  	delete(dl.peerHashes, id)
   452  	delete(dl.peerHeaders, id)
   453  	delete(dl.peerBlocks, id)
   454  	delete(dl.peerChainTds, id)
   455  
   456  	dl.downloader.UnregisterPeer(id)
   457  }
   458  
   459  type downloadTesterPeer struct {
   460  	dl    *downloadTester
   461  	id    string
   462  	delay time.Duration
   463  	lock  sync.RWMutex
   464  }
   465  
   466  // setDelay is a thread safe setter for the network delay value.
   467  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   468  	dlp.lock.Lock()
   469  	defer dlp.lock.Unlock()
   470  
   471  	dlp.delay = delay
   472  }
   473  
   474  // waitDelay is a thread safe way to sleep for the configured time.
   475  func (dlp *downloadTesterPeer) waitDelay() {
   476  	dlp.lock.RLock()
   477  	delay := dlp.delay
   478  	dlp.lock.RUnlock()
   479  
   480  	time.Sleep(delay)
   481  }
   482  
   483  // Head constructs a function to retrieve a peer's current head hash
   484  // and total difficulty.
   485  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   486  	dlp.dl.lock.RLock()
   487  	defer dlp.dl.lock.RUnlock()
   488  
   489  	return dlp.dl.peerHashes[dlp.id][0], nil
   490  }
   491  
   492  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   493  // origin; associated with a particular peer in the download tester. The returned
   494  // function can be used to retrieve batches of headers from the particular peer.
   495  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   496  	// Find the canonical number of the hash
   497  	dlp.dl.lock.RLock()
   498  	number := uint64(0)
   499  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   500  		if hash == origin {
   501  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   502  			break
   503  		}
   504  	}
   505  	dlp.dl.lock.RUnlock()
   506  
   507  	// Use the absolute header fetcher to satisfy the query
   508  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   509  }
   510  
   511  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   512  // origin; associated with a particular peer in the download tester. The returned
   513  // function can be used to retrieve batches of headers from the particular peer.
   514  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   515  	dlp.waitDelay()
   516  
   517  	dlp.dl.lock.RLock()
   518  	defer dlp.dl.lock.RUnlock()
   519  
   520  	// Gather the next batch of headers
   521  	hashes := dlp.dl.peerHashes[dlp.id]
   522  	headers := dlp.dl.peerHeaders[dlp.id]
   523  	result := make([]*types.Header, 0, amount)
   524  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   525  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   526  			result = append(result, header)
   527  		}
   528  	}
   529  	// Delay delivery a bit to allow attacks to unfold
   530  	go func() {
   531  		time.Sleep(time.Millisecond)
   532  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   533  	}()
   534  	return nil
   535  }
   536  
   537  // RequestBodies constructs a getBlockBodies method associated with a particular
   538  // peer in the download tester. The returned function can be used to retrieve
   539  // batches of block bodies from the particularly requested peer.
   540  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   541  	dlp.waitDelay()
   542  
   543  	dlp.dl.lock.RLock()
   544  	defer dlp.dl.lock.RUnlock()
   545  
   546  	blocks := dlp.dl.peerBlocks[dlp.id]
   547  
   548  	transactions := make([][]*types.Transaction, 0, len(hashes))
   549  	uncles := make([][]*types.Header, 0, len(hashes))
   550  
   551  	for _, hash := range hashes {
   552  		if block, ok := blocks[hash]; ok {
   553  			transactions = append(transactions, block.Transactions())
   554  			uncles = append(uncles, block.Uncles())
   555  		}
   556  	}
   557  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   558  
   559  	return nil
   560  }
   561  
   562  // RequestReceipts constructs a getReceipts method associated with a particular
   563  // peer in the download tester. The returned function can be used to retrieve
   564  // batches of block receipts from the particularly requested peer.
   565  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   566  	dlp.waitDelay()
   567  
   568  	dlp.dl.lock.RLock()
   569  	defer dlp.dl.lock.RUnlock()
   570  
   571  	receipts := dlp.dl.peerReceipts[dlp.id]
   572  
   573  	results := make([][]*types.Receipt, 0, len(hashes))
   574  	for _, hash := range hashes {
   575  		if receipt, ok := receipts[hash]; ok {
   576  			results = append(results, receipt)
   577  		}
   578  	}
   579  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   580  
   581  	return nil
   582  }
   583  
   584  // RequestNodeData constructs a getNodeData method associated with a particular
   585  // peer in the download tester. The returned function can be used to retrieve
   586  // batches of node state data from the particularly requested peer.
   587  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   588  	dlp.waitDelay()
   589  
   590  	dlp.dl.lock.RLock()
   591  	defer dlp.dl.lock.RUnlock()
   592  
   593  	results := make([][]byte, 0, len(hashes))
   594  	for _, hash := range hashes {
   595  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   596  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   597  				results = append(results, data)
   598  			}
   599  		}
   600  	}
   601  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   602  
   603  	return nil
   604  }
   605  
   606  // assertOwnChain checks if the local chain contains the correct number of items
   607  // of the various chain components.
   608  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   609  	assertOwnForkedChain(t, tester, 1, []int{length})
   610  }
   611  
   612  // assertOwnForkedChain checks if the local forked chain contains the correct
   613  // number of items of the various chain components.
   614  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   615  	// Initialize the counters for the first fork
   616  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   617  
   618  	if receipts < 0 {
   619  		receipts = 1
   620  	}
   621  	// Update the counters for each subsequent fork
   622  	for _, length := range lengths[1:] {
   623  		headers += length - common
   624  		blocks += length - common
   625  		receipts += length - common - fsMinFullBlocks
   626  	}
   627  	switch tester.downloader.mode {
   628  	case FullSync:
   629  		receipts = 1
   630  	case LightSync:
   631  		blocks, receipts = 1, 1
   632  	}
   633  	if hs := len(tester.ownHeaders); hs != headers {
   634  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   635  	}
   636  	if bs := len(tester.ownBlocks); bs != blocks {
   637  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   638  	}
   639  	if rs := len(tester.ownReceipts); rs != receipts {
   640  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   641  	}
   642  	// Verify the state trie too for fast syncs
   643  	/*if tester.downloader.mode == FastSync {
   644  		pivot := uint64(0)
   645  		var index int
   646  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   647  			index = pivot
   648  		} else {
   649  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   650  		}
   651  		if index > 0 {
   652  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   653  				t.Fatalf("state reconstruction failed: %v", err)
   654  			}
   655  		}
   656  	}*/
   657  }
   658  
   659  // Tests that simple synchronization against a canonical chain works correctly.
   660  // In this test common ancestor lookup should be short circuited and not require
   661  // binary searching.
   662  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   663  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   664  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   665  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   666  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   667  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   668  
   669  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   670  	t.Parallel()
   671  
   672  	tester := newTester()
   673  	defer tester.terminate()
   674  
   675  	// Create a small enough block chain to download
   676  	targetBlocks := blockCacheItems - 15
   677  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   678  
   679  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   680  
   681  	// Synchronise with the peer and make sure all relevant data was retrieved
   682  	if err := tester.sync("peer", nil, mode); err != nil {
   683  		t.Fatalf("failed to synchronise blocks: %v", err)
   684  	}
   685  	assertOwnChain(t, tester, targetBlocks+1)
   686  }
   687  
   688  // Tests that if a large batch of blocks are being downloaded, it is throttled
   689  // until the cached blocks are retrieved.
   690  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   691  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   692  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   693  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   694  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   695  
   696  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   697  	t.Parallel()
   698  	tester := newTester()
   699  	defer tester.terminate()
   700  
   701  	// Create a long block chain to download and the tester
   702  	targetBlocks := 8 * blockCacheItems
   703  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   704  
   705  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   706  
   707  	// Wrap the importer to allow stepping
   708  	blocked, proceed := uint32(0), make(chan struct{})
   709  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   710  		atomic.StoreUint32(&blocked, uint32(len(results)))
   711  		<-proceed
   712  	}
   713  	// Start a synchronisation concurrently
   714  	errc := make(chan error)
   715  	go func() {
   716  		errc <- tester.sync("peer", nil, mode)
   717  	}()
   718  	// Iteratively take some blocks, always checking the retrieval count
   719  	for {
   720  		// Check the retrieval count synchronously (! reason for this ugly block)
   721  		tester.lock.RLock()
   722  		retrieved := len(tester.ownBlocks)
   723  		tester.lock.RUnlock()
   724  		if retrieved >= targetBlocks+1 {
   725  			break
   726  		}
   727  		// Wait a bit for sync to throttle itself
   728  		var cached, frozen int
   729  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   730  			time.Sleep(25 * time.Millisecond)
   731  
   732  			tester.lock.Lock()
   733  			tester.downloader.queue.lock.Lock()
   734  			cached = len(tester.downloader.queue.blockDonePool)
   735  			if mode == FastSync {
   736  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   737  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   738  					cached = receipts
   739  					//}
   740  				}
   741  			}
   742  			frozen = int(atomic.LoadUint32(&blocked))
   743  			retrieved = len(tester.ownBlocks)
   744  			tester.downloader.queue.lock.Unlock()
   745  			tester.lock.Unlock()
   746  
   747  			if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   748  				break
   749  			}
   750  		}
   751  		// Make sure we filled up the cache, then exhaust it
   752  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   753  
   754  		tester.lock.RLock()
   755  		retrieved = len(tester.ownBlocks)
   756  		tester.lock.RUnlock()
   757  		if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   758  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   759  		}
   760  		// Permit the blocked blocks to import
   761  		if atomic.LoadUint32(&blocked) > 0 {
   762  			atomic.StoreUint32(&blocked, uint32(0))
   763  			proceed <- struct{}{}
   764  		}
   765  	}
   766  	// Check that we haven't pulled more blocks than available
   767  	assertOwnChain(t, tester, targetBlocks+1)
   768  	if err := <-errc; err != nil {
   769  		t.Fatalf("block synchronization failed: %v", err)
   770  	}
   771  }
   772  
   773  // Tests that simple synchronization against a forked chain works correctly. In
   774  // this test common ancestor lookup should *not* be short circuited, and a full
   775  // binary search should be executed.
   776  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   777  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   778  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   779  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   780  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   781  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   782  
   783  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   784  	t.Parallel()
   785  
   786  	tester := newTester()
   787  	defer tester.terminate()
   788  
   789  	// Create a long enough forked chain
   790  	common, fork := MaxHashFetch, 2*MaxHashFetch
   791  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   792  
   793  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   794  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   795  
   796  	// Synchronise with the peer and make sure all blocks were retrieved
   797  	if err := tester.sync("fork A", nil, mode); err != nil {
   798  		t.Fatalf("failed to synchronise blocks: %v", err)
   799  	}
   800  	assertOwnChain(t, tester, common+fork+1)
   801  
   802  	// Synchronise with the second peer and make sure that fork is pulled too
   803  	if err := tester.sync("fork B", nil, mode); err != nil {
   804  		t.Fatalf("failed to synchronise blocks: %v", err)
   805  	}
   806  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   807  }
   808  
   809  // Tests that synchronising against a much shorter but much heavyer fork works
   810  // corrently and is not dropped.
   811  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   812  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   813  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   814  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   815  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   816  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   817  
   818  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   819  	t.Parallel()
   820  
   821  	tester := newTester()
   822  	defer tester.terminate()
   823  
   824  	// Create a long enough forked chain
   825  	common, fork := MaxHashFetch, 4*MaxHashFetch
   826  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   827  
   828  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   829  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   830  
   831  	// Synchronise with the peer and make sure all blocks were retrieved
   832  	if err := tester.sync("light", nil, mode); err != nil {
   833  		t.Fatalf("failed to synchronise blocks: %v", err)
   834  	}
   835  	assertOwnChain(t, tester, common+fork+1)
   836  
   837  	// Synchronise with the second peer and make sure that fork is pulled too
   838  	if err := tester.sync("heavy", nil, mode); err != nil {
   839  		t.Fatalf("failed to synchronise blocks: %v", err)
   840  	}
   841  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   842  }
   843  
   844  // Tests that chain forks are contained within a certain interval of the current
   845  // chain head, ensuring that malicious peers cannot waste resources by feeding
   846  // long dead chains.
   847  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   848  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   849  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   850  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   851  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   852  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   853  
   854  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   855  	t.Parallel()
   856  
   857  	tester := newTester()
   858  	defer tester.terminate()
   859  
   860  	// Create a long enough forked chain
   861  	common, fork := 13, int(MaxForkAncestry+17)
   862  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   863  
   864  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   865  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   866  
   867  	// Synchronise with the peer and make sure all blocks were retrieved
   868  	if err := tester.sync("original", nil, mode); err != nil {
   869  		t.Fatalf("failed to synchronise blocks: %v", err)
   870  	}
   871  	assertOwnChain(t, tester, common+fork+1)
   872  
   873  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   874  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   875  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   876  	}
   877  }
   878  
   879  // Tests that chain forks are contained within a certain interval of the current
   880  // chain head for short but heavy forks too. These are a bit special because they
   881  // take different ancestor lookup paths.
   882  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   883  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   884  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   885  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   886  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   887  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   888  
   889  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   890  	t.Parallel()
   891  
   892  	tester := newTester()
   893  	defer tester.terminate()
   894  
   895  	// Create a long enough forked chain
   896  	common, fork := 13, int(MaxForkAncestry+17)
   897  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   898  
   899  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   900  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   901  
   902  	// Synchronise with the peer and make sure all blocks were retrieved
   903  	if err := tester.sync("original", nil, mode); err != nil {
   904  		t.Fatalf("failed to synchronise blocks: %v", err)
   905  	}
   906  	assertOwnChain(t, tester, common+fork+1)
   907  
   908  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   909  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   910  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   911  	}
   912  }
   913  
   914  // Tests that an inactive downloader will not accept incoming block headers and
   915  // bodies.
   916  func TestInactiveDownloader62(t *testing.T) {
   917  	t.Parallel()
   918  
   919  	tester := newTester()
   920  	defer tester.terminate()
   921  
   922  	// Check that neither block headers nor bodies are accepted
   923  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   924  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   925  	}
   926  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   927  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   928  	}
   929  }
   930  
   931  // Tests that an inactive downloader will not accept incoming block headers,
   932  // bodies and receipts.
   933  func TestInactiveDownloader63(t *testing.T) {
   934  	t.Parallel()
   935  
   936  	tester := newTester()
   937  	defer tester.terminate()
   938  
   939  	// Check that neither block headers nor bodies are accepted
   940  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   941  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   942  	}
   943  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   944  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   945  	}
   946  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   947  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   948  	}
   949  }
   950  
   951  // Tests that a canceled download wipes all previously accumulated state.
   952  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   953  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   954  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   955  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   956  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   957  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   958  
   959  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   960  	t.Parallel()
   961  
   962  	tester := newTester()
   963  	defer tester.terminate()
   964  
   965  	// Create a small enough block chain to download and the tester
   966  	targetBlocks := blockCacheItems - 15
   967  	if targetBlocks >= MaxHashFetch {
   968  		targetBlocks = MaxHashFetch - 15
   969  	}
   970  	if targetBlocks >= MaxHeaderFetch {
   971  		targetBlocks = MaxHeaderFetch - 15
   972  	}
   973  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   974  
   975  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   976  
   977  	// Make sure canceling works with a pristine downloader
   978  	tester.downloader.Cancel()
   979  	if !tester.downloader.queue.Idle() {
   980  		t.Errorf("download queue not idle")
   981  	}
   982  	// Synchronise with the peer, but cancel afterwards
   983  	if err := tester.sync("peer", nil, mode); err != nil {
   984  		t.Fatalf("failed to synchronise blocks: %v", err)
   985  	}
   986  	tester.downloader.Cancel()
   987  	if !tester.downloader.queue.Idle() {
   988  		t.Errorf("download queue not idle")
   989  	}
   990  }
   991  
   992  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   993  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   994  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   995  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   996  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   997  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   998  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   999  
  1000  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
  1001  	t.Parallel()
  1002  
  1003  	tester := newTester()
  1004  	defer tester.terminate()
  1005  
  1006  	// Create various peers with various parts of the chain
  1007  	targetPeers := 8
  1008  	targetBlocks := targetPeers*blockCacheItems - 15
  1009  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1010  
  1011  	for i := 0; i < targetPeers; i++ {
  1012  		id := fmt.Sprintf("peer #%d", i)
  1013  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
  1014  	}
  1015  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1016  		t.Fatalf("failed to synchronise blocks: %v", err)
  1017  	}
  1018  	assertOwnChain(t, tester, targetBlocks+1)
  1019  }
  1020  
  1021  // Tests that synchronisations behave well in multi-version protocol environments
  1022  // and not wreak havoc on other nodes in the network.
  1023  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1024  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1025  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1026  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1027  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1028  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1029  
  1030  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1031  	t.Parallel()
  1032  
  1033  	tester := newTester()
  1034  	defer tester.terminate()
  1035  
  1036  	// Create a small enough block chain to download
  1037  	targetBlocks := blockCacheItems - 15
  1038  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1039  
  1040  	// Create peers of every type
  1041  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1042  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1043  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1044  
  1045  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1046  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1047  		t.Fatalf("failed to synchronise blocks: %v", err)
  1048  	}
  1049  	assertOwnChain(t, tester, targetBlocks+1)
  1050  
  1051  	// Check that no peers have been dropped off
  1052  	for _, version := range []int{62, 63, 64} {
  1053  		peer := fmt.Sprintf("peer %d", version)
  1054  		if _, ok := tester.peerHashes[peer]; !ok {
  1055  			t.Errorf("%s dropped", peer)
  1056  		}
  1057  	}
  1058  }
  1059  
  1060  // Tests that if a block is empty (e.g. header only), no body request should be
  1061  // made, and instead the header should be assembled into a whole block in itself.
  1062  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1063  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1064  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1065  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1066  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1067  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1068  
  1069  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1070  	t.Parallel()
  1071  
  1072  	tester := newTester()
  1073  	defer tester.terminate()
  1074  
  1075  	// Create a block chain to download
  1076  	targetBlocks := 2*blockCacheItems - 15
  1077  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1078  
  1079  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1080  
  1081  	// Instrument the downloader to signal body requests
  1082  	bodiesHave, receiptsHave := int32(0), int32(0)
  1083  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1084  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1085  	}
  1086  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1087  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1088  	}
  1089  	// Synchronise with the peer and make sure all blocks were retrieved
  1090  	if err := tester.sync("peer", nil, mode); err != nil {
  1091  		t.Fatalf("failed to synchronise blocks: %v", err)
  1092  	}
  1093  	assertOwnChain(t, tester, targetBlocks+1)
  1094  
  1095  	// Validate the number of block bodies that should have been requested
  1096  	bodiesNeeded, receiptsNeeded := 0, 0
  1097  	for _, block := range blocks {
  1098  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1099  			bodiesNeeded++
  1100  		}
  1101  	}
  1102  	for _, receipt := range receipts {
  1103  		if mode == FastSync && len(receipt) > 0 {
  1104  			receiptsNeeded++
  1105  		}
  1106  	}
  1107  	if int(bodiesHave) != bodiesNeeded {
  1108  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1109  	}
  1110  	if int(receiptsHave) != receiptsNeeded {
  1111  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1112  	}
  1113  }
  1114  
  1115  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1116  // stalling the downloader by feeding gapped header chains.
  1117  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1118  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1119  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1120  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1121  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1122  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1123  
  1124  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1125  	t.Parallel()
  1126  
  1127  	tester := newTester()
  1128  	defer tester.terminate()
  1129  
  1130  	// Create a small enough block chain to download
  1131  	targetBlocks := blockCacheItems - 15
  1132  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1133  
  1134  	// Attempt a full sync with an attacker feeding gapped headers
  1135  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1136  	missing := targetBlocks / 2
  1137  	delete(tester.peerHeaders["attack"], hashes[missing])
  1138  
  1139  	if err := tester.sync("attack", nil, mode); err == nil {
  1140  		t.Fatalf("succeeded attacker synchronisation")
  1141  	}
  1142  	// Synchronise with the valid peer and make sure sync succeeds
  1143  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1144  	if err := tester.sync("valid", nil, mode); err != nil {
  1145  		t.Fatalf("failed to synchronise blocks: %v", err)
  1146  	}
  1147  	assertOwnChain(t, tester, targetBlocks+1)
  1148  }
  1149  
  1150  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1151  // detects the invalid numbering.
  1152  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1153  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1154  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1155  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1156  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1157  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1158  
  1159  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1160  	t.Parallel()
  1161  
  1162  	tester := newTester()
  1163  	defer tester.terminate()
  1164  
  1165  	// Create a small enough block chain to download
  1166  	targetBlocks := blockCacheItems - 15
  1167  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1168  
  1169  	// Attempt a full sync with an attacker feeding shifted headers
  1170  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1171  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1172  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1173  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1174  
  1175  	if err := tester.sync("attack", nil, mode); err == nil {
  1176  		t.Fatalf("succeeded attacker synchronisation")
  1177  	}
  1178  	// Synchronise with the valid peer and make sure sync succeeds
  1179  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1180  	if err := tester.sync("valid", nil, mode); err != nil {
  1181  		t.Fatalf("failed to synchronise blocks: %v", err)
  1182  	}
  1183  	assertOwnChain(t, tester, targetBlocks+1)
  1184  }
  1185  
  1186  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1187  // for various failure scenarios. Afterwards a full sync is attempted to make
  1188  // sure no state was corrupted.
  1189  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1190  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1191  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1192  
  1193  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1194  	t.Parallel()
  1195  
  1196  	tester := newTester()
  1197  	defer tester.terminate()
  1198  
  1199  	// Create a small enough block chain to download
  1200  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1201  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1202  
  1203  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1204  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1205  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1206  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1207  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1208  
  1209  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1210  		t.Fatalf("succeeded fast attacker synchronisation")
  1211  	}
  1212  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1213  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1214  	}
  1215  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1216  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1217  	// rolled back, and also the pivot point being reverted to a non-block status.
  1218  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1219  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1220  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1221  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1222  
  1223  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1224  		t.Fatalf("succeeded block attacker synchronisation")
  1225  	}
  1226  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1227  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1228  	}
  1229  	if mode == FastSync {
  1230  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1231  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1232  		}
  1233  	}
  1234  	// Attempt to sync with an attacker that withholds promised blocks after the
  1235  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1236  	// but already imported pivot block.
  1237  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1238  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1239  
  1240  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1241  		for i := missing; i <= len(hashes); i++ {
  1242  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1243  		}
  1244  		tester.downloader.syncInitHook = nil
  1245  	}
  1246  
  1247  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1248  		t.Fatalf("succeeded withholding attacker synchronisation")
  1249  	}
  1250  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1251  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1252  	}
  1253  	if mode == FastSync {
  1254  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1255  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1256  		}
  1257  	}
  1258  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1259  	// rollback should also disable fast syncing for this process, verify that we
  1260  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1261  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1262  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1263  	if err := tester.sync("valid", nil, mode); err != nil {
  1264  		t.Fatalf("failed to synchronise blocks: %v", err)
  1265  	}
  1266  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1267  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1268  	}
  1269  	if mode != LightSync {
  1270  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1271  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1272  		}
  1273  	}
  1274  }
  1275  
  1276  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1277  // afterwards by not sending any useful hashes.
  1278  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1279  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1280  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1281  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1282  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1283  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1284  
  1285  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1286  	t.Parallel()
  1287  
  1288  	tester := newTester()
  1289  	defer tester.terminate()
  1290  
  1291  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1292  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1293  
  1294  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1295  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1296  	}
  1297  }
  1298  
  1299  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1300  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1301  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1302  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1303  
  1304  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1305  	t.Parallel()
  1306  
  1307  	// Define the disconnection requirement for individual hash fetch errors
  1308  	tests := []struct {
  1309  		result error
  1310  		drop   bool
  1311  	}{
  1312  		{nil, false},                        // Sync succeeded, all is well
  1313  		{errBusy, false},                    // Sync is already in progress, no problem
  1314  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1315  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1316  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1317  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1318  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1319  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1320  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1321  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1322  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1323  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1324  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1325  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1326  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1327  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1328  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1329  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1330  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1331  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1332  	}
  1333  	// Run the tests and check disconnection status
  1334  	tester := newTester()
  1335  	defer tester.terminate()
  1336  
  1337  	for i, tt := range tests {
  1338  		// Register a new peer and ensure it's presence
  1339  		id := fmt.Sprintf("test %d", i)
  1340  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1341  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1342  		}
  1343  		if _, ok := tester.peerHashes[id]; !ok {
  1344  			t.Fatalf("test %d: registered peer not found", i)
  1345  		}
  1346  		// Simulate a synchronisation and check the required result
  1347  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1348  
  1349  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1350  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1351  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1352  		}
  1353  	}
  1354  }
  1355  
  1356  // Tests that synchronisation progress (origin block number, current block number
  1357  // and highest block number) is tracked and updated correctly.
  1358  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1359  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1360  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1361  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1362  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1363  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1364  
  1365  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1366  	t.Parallel()
  1367  
  1368  	tester := newTester()
  1369  	defer tester.terminate()
  1370  
  1371  	// Create a small enough block chain to download
  1372  	targetBlocks := blockCacheItems - 15
  1373  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1374  
  1375  	// Set a sync init hook to catch progress changes
  1376  	starting := make(chan struct{})
  1377  	progress := make(chan struct{})
  1378  
  1379  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1380  		starting <- struct{}{}
  1381  		<-progress
  1382  	}
  1383  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1384  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1385  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1386  	}
  1387  	// Synchronise half the blocks and check initial progress
  1388  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1389  	pending := new(sync.WaitGroup)
  1390  	pending.Add(1)
  1391  
  1392  	go func() {
  1393  		defer pending.Done()
  1394  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1395  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1396  		}
  1397  	}()
  1398  	<-starting
  1399  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1400  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1401  	}
  1402  	progress <- struct{}{}
  1403  	pending.Wait()
  1404  
  1405  	// Synchronise all the blocks and check continuation progress
  1406  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1407  	pending.Add(1)
  1408  
  1409  	go func() {
  1410  		defer pending.Done()
  1411  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1412  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1413  		}
  1414  	}()
  1415  	<-starting
  1416  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1417  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1418  	}
  1419  	progress <- struct{}{}
  1420  	pending.Wait()
  1421  
  1422  	// Check final progress after successful sync
  1423  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1424  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1425  	}
  1426  }
  1427  
  1428  // Tests that synchronisation progress (origin block number and highest block
  1429  // number) is tracked and updated correctly in case of a fork (or manual head
  1430  // revertal).
  1431  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1432  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1433  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1434  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1435  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1436  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1437  
  1438  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1439  	t.Parallel()
  1440  
  1441  	tester := newTester()
  1442  	defer tester.terminate()
  1443  
  1444  	// Create a forked chain to simulate origin revertal
  1445  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1446  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1447  
  1448  	// Set a sync init hook to catch progress changes
  1449  	starting := make(chan struct{})
  1450  	progress := make(chan struct{})
  1451  
  1452  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1453  		starting <- struct{}{}
  1454  		<-progress
  1455  	}
  1456  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1457  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1458  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1459  	}
  1460  	// Synchronise with one of the forks and check progress
  1461  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1462  	pending := new(sync.WaitGroup)
  1463  	pending.Add(1)
  1464  
  1465  	go func() {
  1466  		defer pending.Done()
  1467  		if err := tester.sync("fork A", nil, mode); err != nil {
  1468  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1469  		}
  1470  	}()
  1471  	<-starting
  1472  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1473  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1474  	}
  1475  	progress <- struct{}{}
  1476  	pending.Wait()
  1477  
  1478  	// Simulate a successful sync above the fork
  1479  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1480  
  1481  	// Synchronise with the second fork and check progress resets
  1482  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1483  	pending.Add(1)
  1484  
  1485  	go func() {
  1486  		defer pending.Done()
  1487  		if err := tester.sync("fork B", nil, mode); err != nil {
  1488  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1489  		}
  1490  	}()
  1491  	<-starting
  1492  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1493  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1494  	}
  1495  	progress <- struct{}{}
  1496  	pending.Wait()
  1497  
  1498  	// Check final progress after successful sync
  1499  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1500  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1501  	}
  1502  }
  1503  
  1504  // Tests that if synchronisation is aborted due to some failure, then the progress
  1505  // origin is not updated in the next sync cycle, as it should be considered the
  1506  // continuation of the previous sync and not a new instance.
  1507  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1508  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1509  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1510  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1511  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1512  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1513  
  1514  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1515  	t.Parallel()
  1516  
  1517  	tester := newTester()
  1518  	defer tester.terminate()
  1519  
  1520  	// Create a small enough block chain to download
  1521  	targetBlocks := blockCacheItems - 15
  1522  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1523  
  1524  	// Set a sync init hook to catch progress changes
  1525  	starting := make(chan struct{})
  1526  	progress := make(chan struct{})
  1527  
  1528  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1529  		starting <- struct{}{}
  1530  		<-progress
  1531  	}
  1532  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1533  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1534  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1535  	}
  1536  	// Attempt a full sync with a faulty peer
  1537  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1538  	missing := targetBlocks / 2
  1539  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1540  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1541  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1542  
  1543  	pending := new(sync.WaitGroup)
  1544  	pending.Add(1)
  1545  
  1546  	go func() {
  1547  		defer pending.Done()
  1548  		if err := tester.sync("faulty", nil, mode); err == nil {
  1549  			panic("succeeded faulty synchronisation")
  1550  		}
  1551  	}()
  1552  	<-starting
  1553  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1554  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1555  	}
  1556  	progress <- struct{}{}
  1557  	pending.Wait()
  1558  
  1559  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1560  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1561  	pending.Add(1)
  1562  
  1563  	go func() {
  1564  		defer pending.Done()
  1565  		if err := tester.sync("valid", nil, mode); err != nil {
  1566  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1567  		}
  1568  	}()
  1569  	<-starting
  1570  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1571  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1572  	}
  1573  	progress <- struct{}{}
  1574  	pending.Wait()
  1575  
  1576  	// Check final progress after successful sync
  1577  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1578  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1579  	}
  1580  }
  1581  
  1582  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1583  // the progress height is successfully reduced at the next sync invocation.
  1584  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1585  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1586  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1587  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1588  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1589  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1590  
  1591  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1592  	t.Parallel()
  1593  
  1594  	tester := newTester()
  1595  	defer tester.terminate()
  1596  
  1597  	// Create a small block chain
  1598  	targetBlocks := blockCacheItems - 15
  1599  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1600  
  1601  	// Set a sync init hook to catch progress changes
  1602  	starting := make(chan struct{})
  1603  	progress := make(chan struct{})
  1604  
  1605  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1606  		starting <- struct{}{}
  1607  		<-progress
  1608  	}
  1609  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1610  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1611  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1612  	}
  1613  	//  Create and sync with an attacker that promises a higher chain than available
  1614  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1615  	for i := 1; i < 3; i++ {
  1616  		delete(tester.peerHeaders["attack"], hashes[i])
  1617  		delete(tester.peerBlocks["attack"], hashes[i])
  1618  		delete(tester.peerReceipts["attack"], hashes[i])
  1619  	}
  1620  
  1621  	pending := new(sync.WaitGroup)
  1622  	pending.Add(1)
  1623  
  1624  	go func() {
  1625  		defer pending.Done()
  1626  		if err := tester.sync("attack", nil, mode); err == nil {
  1627  			panic("succeeded attacker synchronisation")
  1628  		}
  1629  	}()
  1630  	<-starting
  1631  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1632  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1633  	}
  1634  	progress <- struct{}{}
  1635  	pending.Wait()
  1636  
  1637  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1638  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1639  	pending.Add(1)
  1640  
  1641  	go func() {
  1642  		defer pending.Done()
  1643  		if err := tester.sync("valid", nil, mode); err != nil {
  1644  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1645  		}
  1646  	}()
  1647  	<-starting
  1648  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1649  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1650  	}
  1651  	progress <- struct{}{}
  1652  	pending.Wait()
  1653  
  1654  	// Check final progress after successful sync
  1655  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1656  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1657  	}
  1658  }
  1659  
  1660  // This test reproduces an issue where unexpected deliveries would
  1661  // block indefinitely if they arrived at the right time.
  1662  // We use data driven subtests to manage this so that it will be parallel on its own
  1663  // and not with the other tests, avoiding intermittent failures.
  1664  func TestDeliverHeadersHang(t *testing.T) {
  1665  	testCases := []struct {
  1666  		protocol int
  1667  		syncMode SyncMode
  1668  	}{
  1669  		{62, FullSync},
  1670  		{63, FullSync},
  1671  		{63, FastSync},
  1672  		{64, FullSync},
  1673  		{64, FastSync},
  1674  		{64, LightSync},
  1675  	}
  1676  	for _, tc := range testCases {
  1677  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1678  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1679  		})
  1680  	}
  1681  }
  1682  
  1683  type floodingTestPeer struct {
  1684  	peer   Peer
  1685  	tester *downloadTester
  1686  	pend   sync.WaitGroup
  1687  }
  1688  
  1689  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1690  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1691  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1692  }
  1693  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1694  	return ftp.peer.RequestBodies(hashes)
  1695  }
  1696  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1697  	return ftp.peer.RequestReceipts(hashes)
  1698  }
  1699  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1700  	return ftp.peer.RequestNodeData(hashes)
  1701  }
  1702  
  1703  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1704  	deliveriesDone := make(chan struct{}, 500)
  1705  	for i := 0; i < cap(deliveriesDone); i++ {
  1706  		peer := fmt.Sprintf("fake-peer%d", i)
  1707  		ftp.pend.Add(1)
  1708  
  1709  		go func() {
  1710  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1711  			deliveriesDone <- struct{}{}
  1712  			ftp.pend.Done()
  1713  		}()
  1714  	}
  1715  	// Deliver the actual requested headers.
  1716  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1717  	// None of the extra deliveries should block.
  1718  	timeout := time.After(60 * time.Second)
  1719  	for i := 0; i < cap(deliveriesDone); i++ {
  1720  		select {
  1721  		case <-deliveriesDone:
  1722  		case <-timeout:
  1723  			panic("blocked")
  1724  		}
  1725  	}
  1726  	return nil
  1727  }
  1728  
  1729  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1730  	t.Parallel()
  1731  
  1732  	master := newTester()
  1733  	defer master.terminate()
  1734  
  1735  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1736  	for i := 0; i < 200; i++ {
  1737  		tester := newTester()
  1738  		tester.peerDb = master.peerDb
  1739  
  1740  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1741  		// Whenever the downloader requests headers, flood it with
  1742  		// a lot of unrequested header deliveries.
  1743  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1744  			peer:   tester.downloader.peers.peers["peer"].peer,
  1745  			tester: tester,
  1746  		}
  1747  		if err := tester.sync("peer", nil, mode); err != nil {
  1748  			t.Errorf("test %d: sync failed: %v", i, err)
  1749  		}
  1750  		tester.terminate()
  1751  
  1752  		// Flush all goroutines to prevent messing with subsequent tests
  1753  		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1754  	}
  1755  }