github.com/n1ghtfa1l/go-vnt@v0.6.4-alpha.6/vnt/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	libp2p "github.com/libp2p/go-libp2p-peer"
    29  	"github.com/vntchain/go-vnt/common"
    30  	"github.com/vntchain/go-vnt/consensus/mock"
    31  	"github.com/vntchain/go-vnt/core"
    32  	"github.com/vntchain/go-vnt/core/types"
    33  	"github.com/vntchain/go-vnt/crypto"
    34  	"github.com/vntchain/go-vnt/event"
    35  	"github.com/vntchain/go-vnt/params"
    36  	"github.com/vntchain/go-vnt/trie"
    37  	"github.com/vntchain/go-vnt/vntdb"
    38  )
    39  
    40  var (
    41  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    42  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    43  )
    44  
    45  // Reduce some of the parameters to make the tester faster.
    46  func init() {
    47  	MaxForkAncestry = uint64(10000)
    48  	blockCacheItems = 1024
    49  	fsHeaderContCheck = 500 * time.Millisecond
    50  }
    51  
    52  // downloadTester is a test simulator for mocking out local block chain.
    53  type downloadTester struct {
    54  	downloader *Downloader
    55  
    56  	genesis *types.Block   // Genesis blocks used by the tester and peers
    57  	stateDb vntdb.Database // Database used by the tester for syncing from peers
    58  	peerDb  vntdb.Database // Database of the peers containing all data
    59  
    60  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    61  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    62  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    63  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    64  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    65  
    66  	peerHashes   map[libp2p.ID][]common.Hash                  // Hash chain belonging to different test peers
    67  	peerHeaders  map[libp2p.ID]map[common.Hash]*types.Header  // Headers belonging to different test peers
    68  	peerBlocks   map[libp2p.ID]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    69  	peerReceipts map[libp2p.ID]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    70  	peerChainTds map[libp2p.ID]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    71  
    72  	peerMissingStates map[libp2p.ID]map[common.Hash]bool // State entries that fast sync should not return
    73  
    74  	lock sync.RWMutex
    75  }
    76  
    77  // newTester creates a new downloader test mocker.
    78  func newTester() *downloadTester {
    79  	testdb := vntdb.NewMemDatabase()
    80  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    81  
    82  	tester := &downloadTester{
    83  		genesis:           genesis,
    84  		peerDb:            testdb,
    85  		ownHashes:         []common.Hash{genesis.Hash()},
    86  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    87  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    88  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    89  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    90  		peerHashes:        make(map[libp2p.ID][]common.Hash),
    91  		peerHeaders:       make(map[libp2p.ID]map[common.Hash]*types.Header),
    92  		peerBlocks:        make(map[libp2p.ID]map[common.Hash]*types.Block),
    93  		peerReceipts:      make(map[libp2p.ID]map[common.Hash]types.Receipts),
    94  		peerChainTds:      make(map[libp2p.ID]map[common.Hash]*big.Int),
    95  		peerMissingStates: make(map[libp2p.ID]map[common.Hash]bool),
    96  	}
    97  	tester.stateDb = vntdb.NewMemDatabase()
    98  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    99  
   100  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   101  
   102  	return tester
   103  }
   104  
   105  // makeChain creates a chain of n blocks starting at and including parent.
   106  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   107  // contains a transactio to allow testing correct block
   108  // reassembly.
   109  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   110  	// Generate the block chain
   111  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, mock.NewMock(), dl.peerDb, n, func(i int, block *core.BlockGen) {
   112  		block.SetCoinbase(common.Address{seed})
   113  
   114  		// If a heavy chain is requested, delay blocks to raise difficulty
   115  		if heavy {
   116  			block.OffsetTime(-1)
   117  		}
   118  		// If the block number is multiple of 3, send a bonus transaction to the producer
   119  		if parent == dl.genesis && i%3 == 0 {
   120  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   121  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   122  			if err != nil {
   123  				panic(err)
   124  			}
   125  			block.AddTx(tx)
   126  		}
   127  	})
   128  	// Convert the block-chain into a hash-chain and header/block maps
   129  	hashes := make([]common.Hash, n+1)
   130  	hashes[len(hashes)-1] = parent.Hash()
   131  
   132  	headerm := make(map[common.Hash]*types.Header, n+1)
   133  	headerm[parent.Hash()] = parent.Header()
   134  
   135  	blockm := make(map[common.Hash]*types.Block, n+1)
   136  	blockm[parent.Hash()] = parent
   137  
   138  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   139  	receiptm[parent.Hash()] = parentReceipts
   140  
   141  	for i, b := range blocks {
   142  		hashes[len(hashes)-i-2] = b.Hash()
   143  		headerm[b.Hash()] = b.Header()
   144  		blockm[b.Hash()] = b
   145  		receiptm[b.Hash()] = receipts[i]
   146  	}
   147  	return hashes, headerm, blockm, receiptm
   148  }
   149  
   150  // makeChainFork creates two chains of length n, such that h1[:f] and
   151  // h2[:f] are different but have a common suffix of length n-f.
   152  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   153  	// Create the common suffix
   154  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   155  
   156  	// Create the forks, making the second heavier if non balanced forks were requested
   157  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   158  	hashes1 = append(hashes1, hashes[1:]...)
   159  
   160  	heavy := false
   161  	if !balanced {
   162  		heavy = true
   163  	}
   164  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   165  	hashes2 = append(hashes2, hashes[1:]...)
   166  
   167  	for hash, header := range headers {
   168  		headers1[hash] = header
   169  		headers2[hash] = header
   170  	}
   171  	for hash, block := range blocks {
   172  		blocks1[hash] = block
   173  		blocks2[hash] = block
   174  	}
   175  	for hash, receipt := range receipts {
   176  		receipts1[hash] = receipt
   177  		receipts2[hash] = receipt
   178  	}
   179  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   180  }
   181  
   182  // terminate aborts any operations on the embedded downloader and releases all
   183  // held resources.
   184  func (dl *downloadTester) terminate() {
   185  	dl.downloader.Terminate()
   186  }
   187  
   188  // sync starts synchronizing with a remote peer, blocking until it completes.
   189  func (dl *downloadTester) sync(id libp2p.ID, td *big.Int, mode SyncMode) error {
   190  	dl.lock.RLock()
   191  	hash := dl.peerHashes[id][0]
   192  	// If no particular TD was requested, load from the peer's blockchain
   193  	if td == nil {
   194  		td = big.NewInt(1)
   195  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   196  			td = diff
   197  		}
   198  	}
   199  	dl.lock.RUnlock()
   200  
   201  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   202  	err := dl.downloader.synchronise(id, hash, td, mode)
   203  	select {
   204  	case <-dl.downloader.cancelCh:
   205  		// Ok, downloader fully cancelled after sync cycle
   206  	default:
   207  		// Downloader is still accepting packets, can block a peer up
   208  		panic("downloader active post sync cycle") // panic will be caught by tester
   209  	}
   210  	return err
   211  }
   212  
   213  // HasHeader checks if a header is present in the testers canonical chain.
   214  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   215  	return dl.GetHeaderByHash(hash) != nil
   216  }
   217  
   218  // HasBlock checks if a block is present in the testers canonical chain.
   219  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   220  	return dl.GetBlockByHash(hash) != nil
   221  }
   222  
   223  // GetHeader retrieves a header from the testers canonical chain.
   224  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   225  	dl.lock.RLock()
   226  	defer dl.lock.RUnlock()
   227  
   228  	return dl.ownHeaders[hash]
   229  }
   230  
   231  // GetBlock retrieves a block from the testers canonical chain.
   232  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   233  	dl.lock.RLock()
   234  	defer dl.lock.RUnlock()
   235  
   236  	return dl.ownBlocks[hash]
   237  }
   238  
   239  // CurrentHeader retrieves the current head header from the canonical chain.
   240  func (dl *downloadTester) CurrentHeader() *types.Header {
   241  	dl.lock.RLock()
   242  	defer dl.lock.RUnlock()
   243  
   244  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   245  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   246  			return header
   247  		}
   248  	}
   249  	return dl.genesis.Header()
   250  }
   251  
   252  // CurrentBlock retrieves the current head block from the canonical chain.
   253  func (dl *downloadTester) CurrentBlock() *types.Block {
   254  	dl.lock.RLock()
   255  	defer dl.lock.RUnlock()
   256  
   257  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   258  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   259  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   260  				return block
   261  			}
   262  		}
   263  	}
   264  	return dl.genesis
   265  }
   266  
   267  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   268  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   269  	dl.lock.RLock()
   270  	defer dl.lock.RUnlock()
   271  
   272  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   273  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   274  			return block
   275  		}
   276  	}
   277  	return dl.genesis
   278  }
   279  
   280  // FastSyncCommitHead manually sets the head block to a given hash.
   281  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   282  	// For now only check that the state trie is correct
   283  	if block := dl.GetBlockByHash(hash); block != nil {
   284  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0)
   285  		return err
   286  	}
   287  	return fmt.Errorf("non existent block: %x", hash[:4])
   288  }
   289  
   290  // GetTd retrieves the block's total difficulty from the canonical chain.
   291  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   292  	dl.lock.RLock()
   293  	defer dl.lock.RUnlock()
   294  
   295  	return dl.ownChainTd[hash]
   296  }
   297  
   298  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   299  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   300  	dl.lock.Lock()
   301  	defer dl.lock.Unlock()
   302  
   303  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   304  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   305  		return 0, errors.New("unknown parent")
   306  	}
   307  	for i := 1; i < len(headers); i++ {
   308  		if headers[i].ParentHash != headers[i-1].Hash() {
   309  			return i, errors.New("unknown parent")
   310  		}
   311  	}
   312  	// Do a full insert if pre-checks passed
   313  	for i, header := range headers {
   314  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   315  			continue
   316  		}
   317  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   318  			return i, errors.New("unknown parent")
   319  		}
   320  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   321  		dl.ownHeaders[header.Hash()] = header
   322  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   323  	}
   324  	return len(headers), nil
   325  }
   326  
   327  // InsertChain injects a new batch of blocks into the simulated chain.
   328  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   329  	dl.lock.Lock()
   330  	defer dl.lock.Unlock()
   331  
   332  	for i, block := range blocks {
   333  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   334  			return i, errors.New("unknown parent")
   335  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   336  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   337  		}
   338  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   339  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   340  			dl.ownHeaders[block.Hash()] = block.Header()
   341  		}
   342  		dl.ownBlocks[block.Hash()] = block
   343  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   344  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   345  	}
   346  	return len(blocks), nil
   347  }
   348  
   349  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   350  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   351  	dl.lock.Lock()
   352  	defer dl.lock.Unlock()
   353  
   354  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   355  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   356  			return i, errors.New("unknown owner")
   357  		}
   358  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   359  			return i, errors.New("unknown parent")
   360  		}
   361  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   362  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   363  	}
   364  	return len(blocks), nil
   365  }
   366  
   367  // Rollback removes some recently added elements from the chain.
   368  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   369  	dl.lock.Lock()
   370  	defer dl.lock.Unlock()
   371  
   372  	for i := len(hashes) - 1; i >= 0; i-- {
   373  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   374  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   375  		}
   376  		delete(dl.ownChainTd, hashes[i])
   377  		delete(dl.ownHeaders, hashes[i])
   378  		delete(dl.ownReceipts, hashes[i])
   379  		delete(dl.ownBlocks, hashes[i])
   380  	}
   381  }
   382  
   383  // newPeer registers a new block download source into the downloader.
   384  func (dl *downloadTester) newPeer(id libp2p.ID, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   385  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   386  }
   387  
   388  // newSlowPeer registers a new block download source into the downloader, with a
   389  // specific delay time on processing the network packets sent to it, simulating
   390  // potentially slow network IO.
   391  func (dl *downloadTester) newSlowPeer(id libp2p.ID, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   392  	dl.lock.Lock()
   393  	defer dl.lock.Unlock()
   394  
   395  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   396  	if err == nil {
   397  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   398  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   399  		copy(dl.peerHashes[id], hashes)
   400  
   401  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   402  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   403  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   404  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   405  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   406  
   407  		genesis := hashes[len(hashes)-1]
   408  		if header := headers[genesis]; header != nil {
   409  			dl.peerHeaders[id][genesis] = header
   410  			dl.peerChainTds[id][genesis] = header.Difficulty
   411  		}
   412  		if block := blocks[genesis]; block != nil {
   413  			dl.peerBlocks[id][genesis] = block
   414  			dl.peerChainTds[id][genesis] = block.Difficulty()
   415  		}
   416  
   417  		for i := len(hashes) - 2; i >= 0; i-- {
   418  			hash := hashes[i]
   419  
   420  			if header, ok := headers[hash]; ok {
   421  				dl.peerHeaders[id][hash] = header
   422  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   423  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   424  				}
   425  			}
   426  			if block, ok := blocks[hash]; ok {
   427  				dl.peerBlocks[id][hash] = block
   428  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   429  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   430  				}
   431  			}
   432  			if receipt, ok := receipts[hash]; ok {
   433  				dl.peerReceipts[id][hash] = receipt
   434  			}
   435  		}
   436  	}
   437  	return err
   438  }
   439  
   440  // dropPeer simulates a hard peer removal from the connection pool.
   441  func (dl *downloadTester) dropPeer(id libp2p.ID) {
   442  	dl.lock.Lock()
   443  	defer dl.lock.Unlock()
   444  
   445  	delete(dl.peerHashes, id)
   446  	delete(dl.peerHeaders, id)
   447  	delete(dl.peerBlocks, id)
   448  	delete(dl.peerChainTds, id)
   449  
   450  	dl.downloader.UnregisterPeer(id)
   451  }
   452  
   453  type downloadTesterPeer struct {
   454  	dl    *downloadTester
   455  	id    libp2p.ID
   456  	delay time.Duration
   457  	lock  sync.RWMutex
   458  }
   459  
   460  // setDelay is a thread safe setter for the network delay value.
   461  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   462  	dlp.lock.Lock()
   463  	defer dlp.lock.Unlock()
   464  
   465  	dlp.delay = delay
   466  }
   467  
   468  // waitDelay is a thread safe way to sleep for the configured time.
   469  func (dlp *downloadTesterPeer) waitDelay() {
   470  	dlp.lock.RLock()
   471  	delay := dlp.delay
   472  	dlp.lock.RUnlock()
   473  
   474  	time.Sleep(delay)
   475  }
   476  
   477  // Head constructs a function to retrieve a peer's current head hash
   478  // and total difficulty.
   479  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   480  	dlp.dl.lock.RLock()
   481  	defer dlp.dl.lock.RUnlock()
   482  
   483  	return dlp.dl.peerHashes[dlp.id][0], nil
   484  }
   485  
   486  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   487  // origin; associated with a particular peer in the download tester. The returned
   488  // function can be used to retrieve batches of headers from the particular peer.
   489  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   490  	// Find the canonical number of the hash
   491  	dlp.dl.lock.RLock()
   492  	number := uint64(0)
   493  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   494  		if hash == origin {
   495  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   496  			break
   497  		}
   498  	}
   499  	dlp.dl.lock.RUnlock()
   500  
   501  	// Use the absolute header fetcher to satisfy the query
   502  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   503  }
   504  
   505  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   506  // origin; associated with a particular peer in the download tester. The returned
   507  // function can be used to retrieve batches of headers from the particular peer.
   508  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   509  	dlp.waitDelay()
   510  
   511  	dlp.dl.lock.RLock()
   512  	defer dlp.dl.lock.RUnlock()
   513  
   514  	// Gather the next batch of headers
   515  	hashes := dlp.dl.peerHashes[dlp.id]
   516  	headers := dlp.dl.peerHeaders[dlp.id]
   517  	result := make([]*types.Header, 0, amount)
   518  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   519  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   520  			result = append(result, header)
   521  		}
   522  	}
   523  	// Delay delivery a bit to allow attacks to unfold
   524  	go func() {
   525  		time.Sleep(time.Millisecond)
   526  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   527  	}()
   528  	return nil
   529  }
   530  
   531  // RequestBodies constructs a getBlockBodies method associated with a particular
   532  // peer in the download tester. The returned function can be used to retrieve
   533  // batches of block bodies from the particularly requested peer.
   534  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   535  	dlp.waitDelay()
   536  
   537  	dlp.dl.lock.RLock()
   538  	defer dlp.dl.lock.RUnlock()
   539  
   540  	blocks := dlp.dl.peerBlocks[dlp.id]
   541  
   542  	transactions := make([][]*types.Transaction, 0, len(hashes))
   543  
   544  	for _, hash := range hashes {
   545  		if block, ok := blocks[hash]; ok {
   546  			transactions = append(transactions, block.Transactions())
   547  		}
   548  	}
   549  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions)
   550  
   551  	return nil
   552  }
   553  
   554  // RequestReceipts constructs a getReceipts method associated with a particular
   555  // peer in the download tester. The returned function can be used to retrieve
   556  // batches of block receipts from the particularly requested peer.
   557  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   558  	dlp.waitDelay()
   559  
   560  	dlp.dl.lock.RLock()
   561  	defer dlp.dl.lock.RUnlock()
   562  
   563  	receipts := dlp.dl.peerReceipts[dlp.id]
   564  
   565  	results := make([][]*types.Receipt, 0, len(hashes))
   566  	for _, hash := range hashes {
   567  		if receipt, ok := receipts[hash]; ok {
   568  			results = append(results, receipt)
   569  		}
   570  	}
   571  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   572  
   573  	return nil
   574  }
   575  
   576  // RequestNodeData constructs a getNodeData method associated with a particular
   577  // peer in the download tester. The returned function can be used to retrieve
   578  // batches of node state data from the particularly requested peer.
   579  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   580  	dlp.waitDelay()
   581  
   582  	dlp.dl.lock.RLock()
   583  	defer dlp.dl.lock.RUnlock()
   584  
   585  	results := make([][]byte, 0, len(hashes))
   586  	for _, hash := range hashes {
   587  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   588  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   589  				results = append(results, data)
   590  			}
   591  		}
   592  	}
   593  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   594  
   595  	return nil
   596  }
   597  
   598  // assertOwnChain checks if the local chain contains the correct number of items
   599  // of the various chain components.
   600  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   601  	assertOwnForkedChain(t, tester, 1, []int{length})
   602  }
   603  
   604  // assertOwnForkedChain checks if the local forked chain contains the correct
   605  // number of items of the various chain components.
   606  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   607  	// Initialize the counters for the first fork
   608  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   609  
   610  	if receipts < 0 {
   611  		receipts = 1
   612  	}
   613  	// Update the counters for each subsequent fork
   614  	for _, length := range lengths[1:] {
   615  		headers += length - common
   616  		blocks += length - common
   617  		receipts += length - common - fsMinFullBlocks
   618  	}
   619  	switch tester.downloader.mode {
   620  	case FullSync:
   621  		receipts = 1
   622  	case LightSync:
   623  		blocks, receipts = 1, 1
   624  	}
   625  	if hs := len(tester.ownHeaders); hs != headers {
   626  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   627  	}
   628  	if bs := len(tester.ownBlocks); bs != blocks {
   629  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   630  	}
   631  	if rs := len(tester.ownReceipts); rs != receipts {
   632  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   633  	}
   634  	// Verify the state trie too for fast syncs
   635  	/*if tester.downloader.mode == FastSync {
   636  		pivot := uint64(0)
   637  		var index int
   638  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   639  			index = pivot
   640  		} else {
   641  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   642  		}
   643  		if index > 0 {
   644  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   645  				t.Fatalf("state reconstruction failed: %v", err)
   646  			}
   647  		}
   648  	}*/
   649  }
   650  
   651  // Tests that simple synchronization against a canonical chain works correctly.
   652  // In this test common ancestor lookup should be short circuited and not require
   653  // binary searching.
   654  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   655  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   656  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   657  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   658  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   659  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   660  
   661  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   662  	t.Parallel()
   663  
   664  	tester := newTester()
   665  	defer tester.terminate()
   666  
   667  	// Create a small enough block chain to download
   668  	targetBlocks := blockCacheItems - 15
   669  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   670  
   671  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   672  
   673  	// Synchronise with the peer and make sure all relevant data was retrieved
   674  	if err := tester.sync("peer", nil, mode); err != nil {
   675  		t.Fatalf("failed to synchronise blocks: %v", err)
   676  	}
   677  	assertOwnChain(t, tester, targetBlocks+1)
   678  }
   679  
   680  // Tests that if a large batch of blocks are being downloaded, it is throttled
   681  // until the cached blocks are retrieved.
   682  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   683  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   684  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   685  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   686  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   687  
   688  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   689  	t.Parallel()
   690  	tester := newTester()
   691  	defer tester.terminate()
   692  
   693  	// Create a long block chain to download and the tester
   694  	targetBlocks := 8 * blockCacheItems
   695  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   696  
   697  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   698  
   699  	// Wrap the importer to allow stepping
   700  	blocked, proceed := uint32(0), make(chan struct{})
   701  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   702  		atomic.StoreUint32(&blocked, uint32(len(results)))
   703  		<-proceed
   704  	}
   705  	// Start a synchronisation concurrently
   706  	errc := make(chan error)
   707  	go func() {
   708  		errc <- tester.sync("peer", nil, mode)
   709  	}()
   710  	// Iteratively take some blocks, always checking the retrieval count
   711  	for {
   712  		// Check the retrieval count synchronously (! reason for this ugly block)
   713  		tester.lock.RLock()
   714  		retrieved := len(tester.ownBlocks)
   715  		tester.lock.RUnlock()
   716  		if retrieved >= targetBlocks+1 {
   717  			break
   718  		}
   719  		// Wait a bit for sync to throttle itself
   720  		var cached, frozen int
   721  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   722  			time.Sleep(25 * time.Millisecond)
   723  
   724  			tester.lock.Lock()
   725  			tester.downloader.queue.lock.Lock()
   726  			cached = len(tester.downloader.queue.blockDonePool)
   727  			if mode == FastSync {
   728  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   729  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   730  					cached = receipts
   731  					//}
   732  				}
   733  			}
   734  			frozen = int(atomic.LoadUint32(&blocked))
   735  			retrieved = len(tester.ownBlocks)
   736  			tester.downloader.queue.lock.Unlock()
   737  			tester.lock.Unlock()
   738  
   739  			if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
   740  				break
   741  			}
   742  		}
   743  		// Make sure we filled up the cache, then exhaust it
   744  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   745  
   746  		tester.lock.RLock()
   747  		retrieved = len(tester.ownBlocks)
   748  		tester.lock.RUnlock()
   749  		if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
   750  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   751  		}
   752  		// Permit the blocked blocks to import
   753  		if atomic.LoadUint32(&blocked) > 0 {
   754  			atomic.StoreUint32(&blocked, uint32(0))
   755  			proceed <- struct{}{}
   756  		}
   757  	}
   758  	// Check that we haven't pulled more blocks than available
   759  	assertOwnChain(t, tester, targetBlocks+1)
   760  	if err := <-errc; err != nil {
   761  		t.Fatalf("block synchronization failed: %v", err)
   762  	}
   763  }
   764  
   765  // Tests that simple synchronization against a forked chain works correctly. In
   766  // this test common ancestor lookup should *not* be short circuited, and a full
   767  // binary search should be executed.
   768  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   769  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   770  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   771  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   772  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   773  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   774  
   775  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   776  	t.Parallel()
   777  
   778  	tester := newTester()
   779  	defer tester.terminate()
   780  
   781  	// Create a long enough forked chain
   782  	common, fork := MaxHashFetch, 2*MaxHashFetch
   783  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   784  
   785  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   786  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   787  
   788  	// Synchronise with the peer and make sure all blocks were retrieved
   789  	if err := tester.sync("fork A", nil, mode); err != nil {
   790  		t.Fatalf("failed to synchronise blocks: %v", err)
   791  	}
   792  	assertOwnChain(t, tester, common+fork+1)
   793  
   794  	// Synchronise with the second peer and make sure that fork is pulled too
   795  	if err := tester.sync("fork B", nil, mode); err != nil {
   796  		t.Fatalf("failed to synchronise blocks: %v", err)
   797  	}
   798  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   799  }
   800  
   801  // Tests that synchronising against a much shorter but much heavyer fork works
   802  // corrently and is not dropped.
   803  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   804  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   805  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   806  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   807  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   808  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   809  
   810  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   811  	t.Parallel()
   812  
   813  	tester := newTester()
   814  	defer tester.terminate()
   815  
   816  	// Create a long enough forked chain
   817  	common, fork := MaxHashFetch, 4*MaxHashFetch
   818  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   819  
   820  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   821  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   822  
   823  	// Synchronise with the peer and make sure all blocks were retrieved
   824  	if err := tester.sync("light", nil, mode); err != nil {
   825  		t.Fatalf("failed to synchronise blocks: %v", err)
   826  	}
   827  	assertOwnChain(t, tester, common+fork+1)
   828  
   829  	// Synchronise with the second peer and make sure that fork is pulled too
   830  	if err := tester.sync("heavy", nil, mode); err != nil {
   831  		t.Fatalf("failed to synchronise blocks: %v", err)
   832  	}
   833  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   834  }
   835  
   836  // Tests that chain forks are contained within a certain interval of the current
   837  // chain head, ensuring that malicious peers cannot waste resources by feeding
   838  // long dead chains.
   839  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   840  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   841  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   842  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   843  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   844  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   845  
   846  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   847  	t.Parallel()
   848  
   849  	tester := newTester()
   850  	defer tester.terminate()
   851  
   852  	// Create a long enough forked chain
   853  	common, fork := 13, int(MaxForkAncestry+17)
   854  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   855  
   856  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   857  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   858  
   859  	// Synchronise with the peer and make sure all blocks were retrieved
   860  	if err := tester.sync("original", nil, mode); err != nil {
   861  		t.Fatalf("failed to synchronise blocks: %v", err)
   862  	}
   863  	assertOwnChain(t, tester, common+fork+1)
   864  
   865  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   866  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   867  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   868  	}
   869  }
   870  
   871  // Tests that chain forks are contained within a certain interval of the current
   872  // chain head for short but heavy forks too. These are a bit special because they
   873  // take different ancestor lookup paths.
   874  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   875  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   876  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   877  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   878  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   879  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   880  
   881  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   882  	t.Parallel()
   883  
   884  	tester := newTester()
   885  	defer tester.terminate()
   886  
   887  	// Create a long enough forked chain
   888  	common, fork := 13, int(MaxForkAncestry+17)
   889  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   890  
   891  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   892  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   893  
   894  	// Synchronise with the peer and make sure all blocks were retrieved
   895  	if err := tester.sync("original", nil, mode); err != nil {
   896  		t.Fatalf("failed to synchronise blocks: %v", err)
   897  	}
   898  	assertOwnChain(t, tester, common+fork+1)
   899  
   900  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   901  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   902  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   903  	}
   904  }
   905  
   906  // Tests that an inactive downloader will not accept incoming block headers and
   907  // bodies.
   908  func TestInactiveDownloader62(t *testing.T) {
   909  	t.Parallel()
   910  
   911  	tester := newTester()
   912  	defer tester.terminate()
   913  
   914  	// Check that neither block headers nor bodies are accepted
   915  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   916  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   917  	}
   918  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
   919  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   920  	}
   921  }
   922  
   923  // Tests that an inactive downloader will not accept incoming block headers,
   924  // bodies and receipts.
   925  func TestInactiveDownloader63(t *testing.T) {
   926  	t.Parallel()
   927  
   928  	tester := newTester()
   929  	defer tester.terminate()
   930  
   931  	// Check that neither block headers nor bodies are accepted
   932  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   933  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   934  	}
   935  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
   936  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   937  	}
   938  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   939  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   940  	}
   941  }
   942  
   943  // Tests that a canceled download wipes all previously accumulated state.
   944  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   945  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   946  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   947  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   948  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   949  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   950  
   951  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   952  	t.Parallel()
   953  
   954  	tester := newTester()
   955  	defer tester.terminate()
   956  
   957  	// Create a small enough block chain to download and the tester
   958  	targetBlocks := blockCacheItems - 15
   959  	if targetBlocks >= MaxHashFetch {
   960  		targetBlocks = MaxHashFetch - 15
   961  	}
   962  	if targetBlocks >= MaxHeaderFetch {
   963  		targetBlocks = MaxHeaderFetch - 15
   964  	}
   965  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   966  
   967  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   968  
   969  	// Make sure canceling works with a pristine downloader
   970  	tester.downloader.Cancel()
   971  	if !tester.downloader.queue.Idle() {
   972  		t.Errorf("download queue not idle")
   973  	}
   974  	// Synchronise with the peer, but cancel afterwards
   975  	if err := tester.sync("peer", nil, mode); err != nil {
   976  		t.Fatalf("failed to synchronise blocks: %v", err)
   977  	}
   978  	tester.downloader.Cancel()
   979  	if !tester.downloader.queue.Idle() {
   980  		t.Errorf("download queue not idle")
   981  	}
   982  }
   983  
   984  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   985  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   986  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   987  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   988  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   989  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   990  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   991  
   992  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   993  	t.Parallel()
   994  
   995  	tester := newTester()
   996  	defer tester.terminate()
   997  
   998  	// Create various peers with various parts of the chain
   999  	targetPeers := 8
  1000  	targetBlocks := targetPeers*blockCacheItems - 15
  1001  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1002  
  1003  	for i := 0; i < targetPeers; i++ {
  1004  		id := libp2p.ID(fmt.Sprintf("peer #%d", i))
  1005  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
  1006  	}
  1007  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1008  		t.Fatalf("failed to synchronise blocks: %v", err)
  1009  	}
  1010  	assertOwnChain(t, tester, targetBlocks+1)
  1011  }
  1012  
  1013  // Tests that synchronisations behave well in multi-version protocol environments
  1014  // and not wreak havoc on other nodes in the network.
  1015  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1016  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1017  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1018  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1019  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1020  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1021  
  1022  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1023  	t.Parallel()
  1024  
  1025  	tester := newTester()
  1026  	defer tester.terminate()
  1027  
  1028  	// Create a small enough block chain to download
  1029  	targetBlocks := blockCacheItems - 15
  1030  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1031  
  1032  	// Create peers of every type
  1033  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1034  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1035  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1036  
  1037  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1038  	if err := tester.sync(libp2p.ID(fmt.Sprintf("peer %d", protocol)), nil, mode); err != nil {
  1039  		t.Fatalf("failed to synchronise blocks: %v", err)
  1040  	}
  1041  	assertOwnChain(t, tester, targetBlocks+1)
  1042  
  1043  	// Check that no peers have been dropped off
  1044  	for _, version := range []int{62, 63, 64} {
  1045  		peer := libp2p.ID(fmt.Sprintf("peer %d", version))
  1046  		if _, ok := tester.peerHashes[peer]; !ok {
  1047  			t.Errorf("%s dropped", peer)
  1048  		}
  1049  	}
  1050  }
  1051  
  1052  // Tests that if a block is empty (e.g. header only), no body request should be
  1053  // made, and instead the header should be assembled into a whole block in itself.
  1054  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1055  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1056  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1057  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1058  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1059  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1060  
  1061  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1062  	t.Parallel()
  1063  
  1064  	tester := newTester()
  1065  	defer tester.terminate()
  1066  
  1067  	// Create a block chain to download
  1068  	targetBlocks := 2*blockCacheItems - 15
  1069  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1070  
  1071  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1072  
  1073  	// Instrument the downloader to signal body requests
  1074  	bodiesHave, receiptsHave := int32(0), int32(0)
  1075  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1076  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1077  	}
  1078  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1079  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1080  	}
  1081  	// Synchronise with the peer and make sure all blocks were retrieved
  1082  	if err := tester.sync("peer", nil, mode); err != nil {
  1083  		t.Fatalf("failed to synchronise blocks: %v", err)
  1084  	}
  1085  	assertOwnChain(t, tester, targetBlocks+1)
  1086  
  1087  	// Validate the number of block bodies that should have been requested
  1088  	bodiesNeeded, receiptsNeeded := 0, 0
  1089  	for _, block := range blocks {
  1090  		if mode != LightSync && block != tester.genesis && len(block.Transactions()) > 0 {
  1091  			bodiesNeeded++
  1092  		}
  1093  	}
  1094  	for _, receipt := range receipts {
  1095  		if mode == FastSync && len(receipt) > 0 {
  1096  			receiptsNeeded++
  1097  		}
  1098  	}
  1099  	if int(bodiesHave) != bodiesNeeded {
  1100  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1101  	}
  1102  	if int(receiptsHave) != receiptsNeeded {
  1103  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1104  	}
  1105  }
  1106  
  1107  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1108  // stalling the downloader by feeding gapped header chains.
  1109  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1110  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1111  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1112  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1113  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1114  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1115  
  1116  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1117  	t.Parallel()
  1118  
  1119  	tester := newTester()
  1120  	defer tester.terminate()
  1121  
  1122  	// Create a small enough block chain to download
  1123  	targetBlocks := blockCacheItems - 15
  1124  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1125  
  1126  	// Attempt a full sync with an attacker feeding gapped headers
  1127  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1128  	missing := targetBlocks / 2
  1129  	delete(tester.peerHeaders["attack"], hashes[missing])
  1130  
  1131  	if err := tester.sync("attack", nil, mode); err == nil {
  1132  		t.Fatalf("succeeded attacker synchronisation")
  1133  	}
  1134  	// Synchronise with the valid peer and make sure sync succeeds
  1135  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1136  	if err := tester.sync("valid", nil, mode); err != nil {
  1137  		t.Fatalf("failed to synchronise blocks: %v", err)
  1138  	}
  1139  	assertOwnChain(t, tester, targetBlocks+1)
  1140  }
  1141  
  1142  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1143  // detects the invalid numbering.
  1144  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1145  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1146  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1147  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1148  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1149  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1150  
  1151  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1152  	t.Parallel()
  1153  
  1154  	tester := newTester()
  1155  	defer tester.terminate()
  1156  
  1157  	// Create a small enough block chain to download
  1158  	targetBlocks := blockCacheItems - 15
  1159  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1160  
  1161  	// Attempt a full sync with an attacker feeding shifted headers
  1162  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1163  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1164  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1165  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1166  
  1167  	if err := tester.sync("attack", nil, mode); err == nil {
  1168  		t.Fatalf("succeeded attacker synchronisation")
  1169  	}
  1170  	// Synchronise with the valid peer and make sure sync succeeds
  1171  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1172  	if err := tester.sync("valid", nil, mode); err != nil {
  1173  		t.Fatalf("failed to synchronise blocks: %v", err)
  1174  	}
  1175  	assertOwnChain(t, tester, targetBlocks+1)
  1176  }
  1177  
  1178  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1179  // for various failure scenarios. Afterwards a full sync is attempted to make
  1180  // sure no state was corrupted.
  1181  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1182  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1183  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1184  
  1185  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1186  	t.Parallel()
  1187  
  1188  	tester := newTester()
  1189  	defer tester.terminate()
  1190  
  1191  	// Create a small enough block chain to download
  1192  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1193  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1194  
  1195  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1196  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1197  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1198  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1199  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1200  
  1201  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1202  		t.Fatalf("succeeded fast attacker synchronisation")
  1203  	}
  1204  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1205  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1206  	}
  1207  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1208  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1209  	// rolled back, and also the pivot point being reverted to a non-block status.
  1210  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1211  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1212  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1213  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1214  
  1215  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1216  		t.Fatalf("succeeded block attacker synchronisation")
  1217  	}
  1218  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1219  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1220  	}
  1221  	if mode == FastSync {
  1222  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1223  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1224  		}
  1225  	}
  1226  	// Attempt to sync with an attacker that withholds promised blocks after the
  1227  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1228  	// but already imported pivot block.
  1229  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1230  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1231  
  1232  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1233  		for i := missing; i <= len(hashes); i++ {
  1234  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1235  		}
  1236  		tester.downloader.syncInitHook = nil
  1237  	}
  1238  
  1239  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1240  		t.Fatalf("succeeded withholding attacker synchronisation")
  1241  	}
  1242  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1243  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1244  	}
  1245  	if mode == FastSync {
  1246  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1247  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1248  		}
  1249  	}
  1250  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1251  	// rollback should also disable fast syncing for this process, verify that we
  1252  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1253  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1254  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1255  	if err := tester.sync("valid", nil, mode); err != nil {
  1256  		t.Fatalf("failed to synchronise blocks: %v", err)
  1257  	}
  1258  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1259  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1260  	}
  1261  	if mode != LightSync {
  1262  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1263  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1264  		}
  1265  	}
  1266  }
  1267  
  1268  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1269  // afterwards by not sending any useful hashes.
  1270  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1271  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1272  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1273  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1274  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1275  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1276  
  1277  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1278  	t.Parallel()
  1279  
  1280  	tester := newTester()
  1281  	defer tester.terminate()
  1282  
  1283  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1284  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1285  
  1286  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1287  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1288  	}
  1289  }
  1290  
  1291  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1292  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1293  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1294  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1295  
  1296  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1297  	t.Parallel()
  1298  
  1299  	// Define the disconnection requirement for individual hash fetch errors
  1300  	tests := []struct {
  1301  		result error
  1302  		drop   bool
  1303  	}{
  1304  		{nil, false},                        // Sync succeeded, all is well
  1305  		{errBusy, false},                    // Sync is already in progress, no problem
  1306  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1307  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1308  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1309  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1310  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1311  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1312  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1313  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1314  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1315  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1316  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1317  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1318  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1319  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1320  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1321  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1322  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1323  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1324  	}
  1325  	// Run the tests and check disconnection status
  1326  	tester := newTester()
  1327  	defer tester.terminate()
  1328  
  1329  	for i, tt := range tests {
  1330  		// Register a new peer and ensure it's presence
  1331  		id := libp2p.ID(fmt.Sprintf("test %d", i))
  1332  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1333  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1334  		}
  1335  		if _, ok := tester.peerHashes[id]; !ok {
  1336  			t.Fatalf("test %d: registered peer not found", i)
  1337  		}
  1338  		// Simulate a synchronisation and check the required result
  1339  		tester.downloader.synchroniseMock = func(libp2p.ID, common.Hash) error { return tt.result }
  1340  
  1341  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1342  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1343  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1344  		}
  1345  	}
  1346  }
  1347  
  1348  // Tests that synchronisation progress (origin block number, current block number
  1349  // and highest block number) is tracked and updated correctly.
  1350  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1351  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1352  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1353  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1354  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1355  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1356  
  1357  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1358  	t.Parallel()
  1359  
  1360  	tester := newTester()
  1361  	defer tester.terminate()
  1362  
  1363  	// Create a small enough block chain to download
  1364  	targetBlocks := blockCacheItems - 15
  1365  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1366  
  1367  	// Set a sync init hook to catch progress changes
  1368  	starting := make(chan struct{})
  1369  	progress := make(chan struct{})
  1370  
  1371  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1372  		starting <- struct{}{}
  1373  		<-progress
  1374  	}
  1375  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1376  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1377  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1378  	}
  1379  	// Synchronise half the blocks and check initial progress
  1380  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1381  	pending := new(sync.WaitGroup)
  1382  	pending.Add(1)
  1383  
  1384  	go func() {
  1385  		defer pending.Done()
  1386  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1387  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1388  		}
  1389  	}()
  1390  	<-starting
  1391  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1392  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1393  	}
  1394  	progress <- struct{}{}
  1395  	pending.Wait()
  1396  
  1397  	// Synchronise all the blocks and check continuation progress
  1398  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1399  	pending.Add(1)
  1400  
  1401  	go func() {
  1402  		defer pending.Done()
  1403  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1404  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1405  		}
  1406  	}()
  1407  	<-starting
  1408  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1409  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1410  	}
  1411  	progress <- struct{}{}
  1412  	pending.Wait()
  1413  
  1414  	// Check final progress after successful sync
  1415  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1416  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1417  	}
  1418  }
  1419  
  1420  // Tests that synchronisation progress (origin block number and highest block
  1421  // number) is tracked and updated correctly in case of a fork (or manual head
  1422  // revertal).
  1423  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1424  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1425  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1426  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1427  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1428  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1429  
  1430  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1431  	t.Parallel()
  1432  
  1433  	tester := newTester()
  1434  	defer tester.terminate()
  1435  
  1436  	// Create a forked chain to simulate origin revertal
  1437  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1438  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1439  
  1440  	// Set a sync init hook to catch progress changes
  1441  	starting := make(chan struct{})
  1442  	progress := make(chan struct{})
  1443  
  1444  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1445  		starting <- struct{}{}
  1446  		<-progress
  1447  	}
  1448  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1449  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1450  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1451  	}
  1452  	// Synchronise with one of the forks and check progress
  1453  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1454  	pending := new(sync.WaitGroup)
  1455  	pending.Add(1)
  1456  
  1457  	go func() {
  1458  		defer pending.Done()
  1459  		if err := tester.sync("fork A", nil, mode); err != nil {
  1460  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1461  		}
  1462  	}()
  1463  	<-starting
  1464  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1465  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1466  	}
  1467  	progress <- struct{}{}
  1468  	pending.Wait()
  1469  
  1470  	// Simulate a successful sync above the fork
  1471  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1472  
  1473  	// Synchronise with the second fork and check progress resets
  1474  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1475  	pending.Add(1)
  1476  
  1477  	go func() {
  1478  		defer pending.Done()
  1479  		if err := tester.sync("fork B", nil, mode); err != nil {
  1480  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1481  		}
  1482  	}()
  1483  	<-starting
  1484  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1485  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1486  	}
  1487  	progress <- struct{}{}
  1488  	pending.Wait()
  1489  
  1490  	// Check final progress after successful sync
  1491  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1492  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1493  	}
  1494  }
  1495  
  1496  // Tests that if synchronisation is aborted due to some failure, then the progress
  1497  // origin is not updated in the next sync cycle, as it should be considered the
  1498  // continuation of the previous sync and not a new instance.
  1499  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1500  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1501  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1502  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1503  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1504  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1505  
  1506  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1507  	t.Parallel()
  1508  
  1509  	tester := newTester()
  1510  	defer tester.terminate()
  1511  
  1512  	// Create a small enough block chain to download
  1513  	targetBlocks := blockCacheItems - 15
  1514  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1515  
  1516  	// Set a sync init hook to catch progress changes
  1517  	starting := make(chan struct{})
  1518  	progress := make(chan struct{})
  1519  
  1520  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1521  		starting <- struct{}{}
  1522  		<-progress
  1523  	}
  1524  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1525  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1526  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1527  	}
  1528  	// Attempt a full sync with a faulty peer
  1529  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1530  	missing := targetBlocks / 2
  1531  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1532  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1533  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1534  
  1535  	pending := new(sync.WaitGroup)
  1536  	pending.Add(1)
  1537  
  1538  	go func() {
  1539  		defer pending.Done()
  1540  		if err := tester.sync("faulty", nil, mode); err == nil {
  1541  			panic("succeeded faulty synchronisation")
  1542  		}
  1543  	}()
  1544  	<-starting
  1545  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1546  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1547  	}
  1548  	progress <- struct{}{}
  1549  	pending.Wait()
  1550  
  1551  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1552  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1553  	pending.Add(1)
  1554  
  1555  	go func() {
  1556  		defer pending.Done()
  1557  		if err := tester.sync("valid", nil, mode); err != nil {
  1558  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1559  		}
  1560  	}()
  1561  	<-starting
  1562  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1563  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1564  	}
  1565  	progress <- struct{}{}
  1566  	pending.Wait()
  1567  
  1568  	// Check final progress after successful sync
  1569  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1570  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1571  	}
  1572  }
  1573  
  1574  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1575  // the progress height is successfully reduced at the next sync invocation.
  1576  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1577  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1578  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1579  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1580  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1581  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1582  
  1583  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1584  	t.Parallel()
  1585  
  1586  	tester := newTester()
  1587  	defer tester.terminate()
  1588  
  1589  	// Create a small block chain
  1590  	targetBlocks := blockCacheItems - 15
  1591  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1592  
  1593  	// Set a sync init hook to catch progress changes
  1594  	starting := make(chan struct{})
  1595  	progress := make(chan struct{})
  1596  
  1597  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1598  		starting <- struct{}{}
  1599  		<-progress
  1600  	}
  1601  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1602  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1603  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1604  	}
  1605  	//  Create and sync with an attacker that promises a higher chain than available
  1606  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1607  	for i := 1; i < 3; i++ {
  1608  		delete(tester.peerHeaders["attack"], hashes[i])
  1609  		delete(tester.peerBlocks["attack"], hashes[i])
  1610  		delete(tester.peerReceipts["attack"], hashes[i])
  1611  	}
  1612  
  1613  	pending := new(sync.WaitGroup)
  1614  	pending.Add(1)
  1615  
  1616  	go func() {
  1617  		defer pending.Done()
  1618  		if err := tester.sync("attack", nil, mode); err == nil {
  1619  			panic("succeeded attacker synchronisation")
  1620  		}
  1621  	}()
  1622  	<-starting
  1623  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1624  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1625  	}
  1626  	progress <- struct{}{}
  1627  	pending.Wait()
  1628  
  1629  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1630  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1631  	pending.Add(1)
  1632  
  1633  	go func() {
  1634  		defer pending.Done()
  1635  		if err := tester.sync("valid", nil, mode); err != nil {
  1636  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1637  		}
  1638  	}()
  1639  	<-starting
  1640  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1641  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1642  	}
  1643  	progress <- struct{}{}
  1644  	pending.Wait()
  1645  
  1646  	// Check final progress after successful sync
  1647  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1648  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1649  	}
  1650  }
  1651  
  1652  // This test reproduces an issue where unexpected deliveries would
  1653  // block indefinitely if they arrived at the right time.
  1654  // We use data driven subtests to manage this so that it will be parallel on its own
  1655  // and not with the other tests, avoiding intermittent failures.
  1656  func TestDeliverHeadersHang(t *testing.T) {
  1657  	testCases := []struct {
  1658  		protocol int
  1659  		syncMode SyncMode
  1660  	}{
  1661  		{62, FullSync},
  1662  		{63, FullSync},
  1663  		{63, FastSync},
  1664  		{64, FullSync},
  1665  		{64, FastSync},
  1666  		{64, LightSync},
  1667  	}
  1668  	for _, tc := range testCases {
  1669  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1670  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1671  		})
  1672  	}
  1673  }
  1674  
  1675  type floodingTestPeer struct {
  1676  	peer   Peer
  1677  	tester *downloadTester
  1678  	pend   sync.WaitGroup
  1679  }
  1680  
  1681  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1682  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1683  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1684  }
  1685  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1686  	return ftp.peer.RequestBodies(hashes)
  1687  }
  1688  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1689  	return ftp.peer.RequestReceipts(hashes)
  1690  }
  1691  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1692  	return ftp.peer.RequestNodeData(hashes)
  1693  }
  1694  
  1695  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1696  	deliveriesDone := make(chan struct{}, 500)
  1697  	for i := 0; i < cap(deliveriesDone); i++ {
  1698  		peer := libp2p.ID(fmt.Sprintf("fake-peer%d", i))
  1699  		ftp.pend.Add(1)
  1700  
  1701  		go func() {
  1702  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1703  			deliveriesDone <- struct{}{}
  1704  			ftp.pend.Done()
  1705  		}()
  1706  	}
  1707  	// Deliver the actual requested headers.
  1708  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1709  	// None of the extra deliveries should block.
  1710  	timeout := time.After(60 * time.Second)
  1711  	for i := 0; i < cap(deliveriesDone); i++ {
  1712  		select {
  1713  		case <-deliveriesDone:
  1714  		case <-timeout:
  1715  			panic("blocked")
  1716  		}
  1717  	}
  1718  	return nil
  1719  }
  1720  
  1721  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1722  	t.Parallel()
  1723  
  1724  	master := newTester()
  1725  	defer master.terminate()
  1726  
  1727  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1728  	for i := 0; i < 200; i++ {
  1729  		tester := newTester()
  1730  		tester.peerDb = master.peerDb
  1731  
  1732  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1733  		// Whenever the downloader requests headers, flood it with
  1734  		// a lot of unrequested header deliveries.
  1735  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1736  			peer:   tester.downloader.peers.peers["peer"].peer,
  1737  			tester: tester,
  1738  		}
  1739  		if err := tester.sync("peer", nil, mode); err != nil {
  1740  			t.Errorf("test %d: sync failed: %v", i, err)
  1741  		}
  1742  		tester.terminate()
  1743  
  1744  		// Flush all goroutines to prevent messing with subsequent tests
  1745  		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1746  	}
  1747  }