github.com/bcskill/bcschain/v3@v3.4.9-beta2/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/bcskill/bcschain/v3/common"
    29  	"github.com/bcskill/bcschain/v3/consensus/clique"
    30  	"github.com/bcskill/bcschain/v3/core"
    31  	"github.com/bcskill/bcschain/v3/core/types"
    32  	"github.com/bcskill/bcschain/v3/crypto"
    33  	"github.com/bcskill/bcschain/v3/ethdb"
    34  	"github.com/bcskill/bcschain/v3/params"
    35  	"github.com/bcskill/bcschain/v3/trie"
    36  )
    37  
    38  var (
    39  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    40  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    41  )
    42  
    43  // Reduce some of the parameters to make the tester faster.
    44  func init() {
    45  	MaxForkAncestry = uint64(10000)
    46  	blockCacheItems = 1024
    47  	fsHeaderContCheck = 500 * time.Millisecond
    48  }
    49  
    50  // downloadTester is a test simulator for mocking out local block chain.
    51  type downloadTester struct {
    52  	downloader *Downloader
    53  
    54  	genesis *types.Block    // Genesis blocks used by the tester and peers
    55  	stateDb common.Database // Database used by the tester for syncing from peers
    56  	peerDb  common.Database // Database of the peers containing all data
    57  
    58  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    59  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    60  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    61  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    62  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    63  
    64  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    65  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    66  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    67  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    68  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    69  
    70  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    71  
    72  	lock sync.RWMutex
    73  }
    74  
    75  // newTester creates a new downloader test mocker.
    76  func newTester() *downloadTester {
    77  	testdb := ethdb.NewMemDatabase()
    78  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    79  
    80  	tester := &downloadTester{
    81  		genesis:           genesis,
    82  		peerDb:            testdb,
    83  		ownHashes:         []common.Hash{genesis.Hash()},
    84  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    85  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    86  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    87  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    88  		peerHashes:        make(map[string][]common.Hash),
    89  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    90  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    91  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    92  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    93  		peerMissingStates: make(map[string]map[common.Hash]bool),
    94  	}
    95  	tester.stateDb = ethdb.NewMemDatabase()
    96  	tester.stateDb.GlobalTable().Put(genesis.Root().Bytes(), []byte{0x00})
    97  
    98  	tester.downloader = New(FullSync, tester.stateDb, new(core.InterfaceFeed), tester, nil, tester.dropPeer)
    99  
   100  	return tester
   101  }
   102  
   103  // makeChain creates a chain of n blocks starting at and including parent.
   104  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   105  // contains a transaction and every 5th an uncle to allow testing correct block
   106  // reassembly.
   107  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   108  	// Generate the block chain
   109  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, clique.NewFaker(), dl.peerDb, n, func(i int, block *core.BlockGen) {
   110  		block.SetCoinbase(common.Address{seed})
   111  
   112  		// If a heavy chain is requested, delay blocks to raise difficulty
   113  		if heavy {
   114  			block.SetDifficulty(1)
   115  		}
   116  		// If the block number is multiple of 3, send a bonus transaction to the miner
   117  		if parent == dl.genesis && i%3 == 0 {
   118  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   119  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   120  			if err != nil {
   121  				panic(err)
   122  			}
   123  			block.AddTx(tx)
   124  		}
   125  	})
   126  	// Convert the block-chain into a hash-chain and header/block maps
   127  	hashes := make([]common.Hash, n+1)
   128  	hashes[len(hashes)-1] = parent.Hash()
   129  
   130  	headerm := make(map[common.Hash]*types.Header, n+1)
   131  	headerm[parent.Hash()] = parent.Header()
   132  
   133  	blockm := make(map[common.Hash]*types.Block, n+1)
   134  	blockm[parent.Hash()] = parent
   135  
   136  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   137  	receiptm[parent.Hash()] = parentReceipts
   138  
   139  	for i, b := range blocks {
   140  		hashes[len(hashes)-i-2] = b.Hash()
   141  		headerm[b.Hash()] = b.Header()
   142  		blockm[b.Hash()] = b
   143  		receiptm[b.Hash()] = receipts[i]
   144  	}
   145  	return hashes, headerm, blockm, receiptm
   146  }
   147  
   148  // makeChainFork creates two chains of length n, such that h1[:f] and
   149  // h2[:f] are different but have a common suffix of length n-f.
   150  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   151  	// Create the common suffix
   152  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   153  
   154  	// Create the forks, making the second heavier if non balanced forks were requested
   155  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   156  	hashes1 = append(hashes1, hashes[1:]...)
   157  
   158  	heavy := false
   159  	if !balanced {
   160  		heavy = true
   161  	}
   162  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   163  	hashes2 = append(hashes2, hashes[1:]...)
   164  
   165  	for hash, header := range headers {
   166  		headers1[hash] = header
   167  		headers2[hash] = header
   168  	}
   169  	for hash, block := range blocks {
   170  		blocks1[hash] = block
   171  		blocks2[hash] = block
   172  	}
   173  	for hash, receipt := range receipts {
   174  		receipts1[hash] = receipt
   175  		receipts2[hash] = receipt
   176  	}
   177  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   178  }
   179  
   180  // terminate aborts any operations on the embedded downloader and releases all
   181  // held resources.
   182  func (dl *downloadTester) terminate() {
   183  	dl.downloader.Terminate()
   184  }
   185  
   186  // sync starts synchronizing with a remote peer, blocking until it completes.
   187  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   188  	dl.lock.RLock()
   189  	hash := dl.peerHashes[id][0]
   190  	// If no particular TD was requested, load from the peer's blockchain
   191  	if td == nil {
   192  		td = big.NewInt(1)
   193  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   194  			td = diff
   195  		}
   196  	}
   197  	dl.lock.RUnlock()
   198  
   199  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   200  	err := dl.downloader.synchronise(id, hash, td, mode)
   201  	select {
   202  	case <-dl.downloader.cancelCh:
   203  		// Ok, downloader fully cancelled after sync cycle
   204  	default:
   205  		// Downloader is still accepting packets, can block a peer up
   206  		panic("downloader active post sync cycle") // panic will be caught by tester
   207  	}
   208  	return err
   209  }
   210  
   211  // HasHeader checks if a header is present in the testers canonical chain.
   212  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   213  	return dl.GetHeaderByHash(hash) != nil
   214  }
   215  
   216  // HasBlock checks if a block is present in the testers canonical chain.
   217  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   218  	return dl.GetBlockByHash(hash) != nil
   219  }
   220  
   221  // GetHeader retrieves a header from the testers canonical chain.
   222  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   223  	dl.lock.RLock()
   224  	defer dl.lock.RUnlock()
   225  
   226  	return dl.ownHeaders[hash]
   227  }
   228  
   229  // GetBlock retrieves a block from the testers canonical chain.
   230  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   231  	dl.lock.RLock()
   232  	defer dl.lock.RUnlock()
   233  
   234  	return dl.ownBlocks[hash]
   235  }
   236  
   237  // CurrentHeader retrieves the current head header from the canonical chain.
   238  func (dl *downloadTester) CurrentHeader() *types.Header {
   239  	dl.lock.RLock()
   240  	defer dl.lock.RUnlock()
   241  
   242  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   243  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   244  			return header
   245  		}
   246  	}
   247  	return dl.genesis.Header()
   248  }
   249  
   250  // CurrentBlock retrieves the current head block from the canonical chain.
   251  func (dl *downloadTester) CurrentBlock() *types.Block {
   252  	dl.lock.RLock()
   253  	defer dl.lock.RUnlock()
   254  
   255  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   256  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   257  			if _, err := dl.stateDb.GlobalTable().Get(block.Root().Bytes()); err == nil {
   258  				return block
   259  			}
   260  		}
   261  	}
   262  	return dl.genesis
   263  }
   264  
   265  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   266  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   267  	dl.lock.RLock()
   268  	defer dl.lock.RUnlock()
   269  
   270  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   271  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   272  			return block
   273  		}
   274  	}
   275  	return dl.genesis
   276  }
   277  
   278  // FastSyncCommitHead manually sets the head block to a given hash.
   279  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   280  	// For now only check that the state trie is correct
   281  	if block := dl.GetBlockByHash(hash); block != nil {
   282  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb.GlobalTable()), 0)
   283  		return err
   284  	}
   285  	return fmt.Errorf("non existent block: %x", hash[:4])
   286  }
   287  
   288  // GetTd retrieves the block's total difficulty from the canonical chain.
   289  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   290  	dl.lock.RLock()
   291  	defer dl.lock.RUnlock()
   292  
   293  	return dl.ownChainTd[hash]
   294  }
   295  
   296  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   297  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   298  	dl.lock.Lock()
   299  	defer dl.lock.Unlock()
   300  
   301  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   302  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   303  		return 0, errors.New("unknown parent")
   304  	}
   305  	for i := 1; i < len(headers); i++ {
   306  		if headers[i].ParentHash != headers[i-1].Hash() {
   307  			return i, errors.New("unknown parent")
   308  		}
   309  	}
   310  	// Do a full insert if pre-checks passed
   311  	for i, header := range headers {
   312  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   313  			continue
   314  		}
   315  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   316  			return i, errors.New("unknown parent")
   317  		}
   318  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   319  		dl.ownHeaders[header.Hash()] = header
   320  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   321  	}
   322  	return len(headers), nil
   323  }
   324  
   325  // InsertChain injects a new batch of blocks into the simulated chain.
   326  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   327  	dl.lock.Lock()
   328  	defer dl.lock.Unlock()
   329  
   330  	for i, block := range blocks {
   331  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   332  			return i, errors.New("unknown parent")
   333  		} else if _, err := dl.stateDb.GlobalTable().Get(parent.Root().Bytes()); err != nil {
   334  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   335  		}
   336  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   337  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   338  			dl.ownHeaders[block.Hash()] = block.Header()
   339  		}
   340  		dl.ownBlocks[block.Hash()] = block
   341  		dl.stateDb.GlobalTable().Put(block.Root().Bytes(), []byte{0x00})
   342  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   343  	}
   344  	return len(blocks), nil
   345  }
   346  
   347  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   348  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   349  	dl.lock.Lock()
   350  	defer dl.lock.Unlock()
   351  
   352  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   353  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   354  			return i, errors.New("unknown owner")
   355  		}
   356  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   357  			return i, errors.New("unknown parent")
   358  		}
   359  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   360  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   361  	}
   362  	return len(blocks), nil
   363  }
   364  
   365  // Rollback removes some recently added elements from the chain.
   366  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   367  	dl.lock.Lock()
   368  	defer dl.lock.Unlock()
   369  
   370  	for i := len(hashes) - 1; i >= 0; i-- {
   371  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   372  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   373  		}
   374  		delete(dl.ownChainTd, hashes[i])
   375  		delete(dl.ownHeaders, hashes[i])
   376  		delete(dl.ownReceipts, hashes[i])
   377  		delete(dl.ownBlocks, hashes[i])
   378  	}
   379  }
   380  
   381  // newPeer registers a new block download source into the downloader.
   382  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   383  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts)
   384  }
   385  
   386  // newSlowPeer registers a new block download source into the downloader, with a
   387  // specific delay time on processing the network packets sent to it, simulating
   388  // potentially slow network IO.
   389  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   390  	dl.lock.Lock()
   391  	defer dl.lock.Unlock()
   392  
   393  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id})
   394  	if err == nil {
   395  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   396  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   397  		copy(dl.peerHashes[id], hashes)
   398  
   399  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   400  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   401  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   402  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   403  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   404  
   405  		genesis := hashes[len(hashes)-1]
   406  		if header := headers[genesis]; header != nil {
   407  			dl.peerHeaders[id][genesis] = header
   408  			dl.peerChainTds[id][genesis] = header.Difficulty
   409  		}
   410  		if block := blocks[genesis]; block != nil {
   411  			dl.peerBlocks[id][genesis] = block
   412  			dl.peerChainTds[id][genesis] = block.Difficulty()
   413  		}
   414  
   415  		for i := len(hashes) - 2; i >= 0; i-- {
   416  			hash := hashes[i]
   417  
   418  			if header, ok := headers[hash]; ok {
   419  				dl.peerHeaders[id][hash] = header
   420  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   421  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   422  				}
   423  			}
   424  			if block, ok := blocks[hash]; ok {
   425  				dl.peerBlocks[id][hash] = block
   426  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   427  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   428  				}
   429  			}
   430  			if receipt, ok := receipts[hash]; ok {
   431  				dl.peerReceipts[id][hash] = receipt
   432  			}
   433  		}
   434  	}
   435  	return err
   436  }
   437  
   438  // dropPeer simulates a hard peer removal from the connection pool.
   439  func (dl *downloadTester) dropPeer(id string) {
   440  	dl.lock.Lock()
   441  	defer dl.lock.Unlock()
   442  
   443  	delete(dl.peerHashes, id)
   444  	delete(dl.peerHeaders, id)
   445  	delete(dl.peerBlocks, id)
   446  	delete(dl.peerChainTds, id)
   447  
   448  	dl.downloader.UnregisterPeer(id)
   449  }
   450  
   451  type downloadTesterPeer struct {
   452  	dl   *downloadTester
   453  	id   string
   454  	lock sync.RWMutex
   455  }
   456  
   457  // Head constructs a function to retrieve a peer's current head hash
   458  // and total difficulty.
   459  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   460  	dlp.dl.lock.RLock()
   461  	defer dlp.dl.lock.RUnlock()
   462  
   463  	return dlp.dl.peerHashes[dlp.id][0], nil
   464  }
   465  
   466  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   467  // origin; associated with a particular peer in the download tester. The returned
   468  // function can be used to retrieve batches of headers from the particular peer.
   469  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   470  	// Find the canonical number of the hash
   471  	dlp.dl.lock.RLock()
   472  	number := uint64(0)
   473  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   474  		if hash == origin {
   475  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   476  			break
   477  		}
   478  	}
   479  	dlp.dl.lock.RUnlock()
   480  
   481  	// Use the absolute header fetcher to satisfy the query
   482  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   483  }
   484  
   485  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   486  // origin; associated with a particular peer in the download tester. The returned
   487  // function can be used to retrieve batches of headers from the particular peer.
   488  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   489  	dlp.dl.lock.RLock()
   490  	defer dlp.dl.lock.RUnlock()
   491  
   492  	// Gather the next batch of headers
   493  	hashes := dlp.dl.peerHashes[dlp.id]
   494  	headers := dlp.dl.peerHeaders[dlp.id]
   495  	result := make([]*types.Header, 0, amount)
   496  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   497  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   498  			result = append(result, header)
   499  		}
   500  	}
   501  	// Delay delivery a bit to allow attacks to unfold
   502  	go func() {
   503  		time.Sleep(time.Millisecond)
   504  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   505  	}()
   506  	return nil
   507  }
   508  
   509  // RequestBodies constructs a getBlockBodies method associated with a particular
   510  // peer in the download tester. The returned function can be used to retrieve
   511  // batches of block bodies from the particularly requested peer.
   512  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   513  	dlp.dl.lock.RLock()
   514  	defer dlp.dl.lock.RUnlock()
   515  
   516  	blocks := dlp.dl.peerBlocks[dlp.id]
   517  
   518  	transactions := make([][]*types.Transaction, 0, len(hashes))
   519  
   520  	for _, hash := range hashes {
   521  		if block, ok := blocks[hash]; ok {
   522  			transactions = append(transactions, block.Transactions())
   523  		}
   524  	}
   525  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions)
   526  
   527  	return nil
   528  }
   529  
   530  // RequestReceipts constructs a getReceipts method associated with a particular
   531  // peer in the download tester. The returned function can be used to retrieve
   532  // batches of block receipts from the particularly requested peer.
   533  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   534  	dlp.dl.lock.RLock()
   535  	defer dlp.dl.lock.RUnlock()
   536  
   537  	receipts := dlp.dl.peerReceipts[dlp.id]
   538  
   539  	results := make([][]*types.Receipt, 0, len(hashes))
   540  	for _, hash := range hashes {
   541  		if receipt, ok := receipts[hash]; ok {
   542  			results = append(results, receipt)
   543  		}
   544  	}
   545  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   546  
   547  	return nil
   548  }
   549  
   550  // RequestNodeData constructs a getNodeData method associated with a particular
   551  // peer in the download tester. The returned function can be used to retrieve
   552  // batches of node state data from the particularly requested peer.
   553  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   554  	dlp.dl.lock.RLock()
   555  	defer dlp.dl.lock.RUnlock()
   556  
   557  	results := make([][]byte, 0, len(hashes))
   558  	for _, hash := range hashes {
   559  		if data, err := dlp.dl.peerDb.GlobalTable().Get(hash.Bytes()); err == nil {
   560  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   561  				results = append(results, data)
   562  			}
   563  		}
   564  	}
   565  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   566  
   567  	return nil
   568  }
   569  
   570  // assertOwnChain checks if the local chain contains the correct number of items
   571  // of the various chain components.
   572  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   573  	assertOwnForkedChain(t, tester, 1, []int{length})
   574  }
   575  
   576  // assertOwnForkedChain checks if the local forked chain contains the correct
   577  // number of items of the various chain components.
   578  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   579  	// Initialize the counters for the first fork
   580  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   581  
   582  	if receipts < 0 {
   583  		receipts = 1
   584  	}
   585  	// Update the counters for each subsequent fork
   586  	for _, length := range lengths[1:] {
   587  		headers += length - common
   588  		blocks += length - common
   589  		receipts += length - common - fsMinFullBlocks
   590  	}
   591  	switch tester.downloader.mode {
   592  	case FullSync:
   593  		receipts = 1
   594  	case LightSync:
   595  		blocks, receipts = 1, 1
   596  	}
   597  	if hs := len(tester.ownHeaders); hs != headers {
   598  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   599  	}
   600  	if bs := len(tester.ownBlocks); bs != blocks {
   601  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   602  	}
   603  	if rs := len(tester.ownReceipts); rs != receipts {
   604  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   605  	}
   606  	// Verify the state trie too for fast syncs
   607  	/*if tester.downloader.mode == FastSync {
   608  		pivot := uint64(0)
   609  		var index int
   610  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   611  			index = pivot
   612  		} else {
   613  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   614  		}
   615  		if index > 0 {
   616  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   617  				t.Fatalf("state reconstruction failed: %v", err)
   618  			}
   619  		}
   620  	}*/
   621  }
   622  
   623  // Tests that simple synchronization against a canonical chain works correctly.
   624  // In this test common ancestor lookup should be short circuited and not require
   625  // binary searching.
   626  func TestCanonicalSynchronisation62(t *testing.T)     { testCanonicalSynchronisation(t, 62, FullSync) }
   627  func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) }
   628  func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) }
   629  func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) }
   630  func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) }
   631  func TestCanonicalSynchronisation64Light(t *testing.T) {
   632  	testCanonicalSynchronisation(t, 64, LightSync)
   633  }
   634  
   635  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   636  	t.Parallel()
   637  
   638  	tester := newTester()
   639  	defer tester.terminate()
   640  
   641  	// Create a small enough block chain to download
   642  	targetBlocks := blockCacheItems - 15
   643  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   644  
   645  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   646  
   647  	// Synchronise with the peer and make sure all relevant data was retrieved
   648  	if err := tester.sync("peer", nil, mode); err != nil {
   649  		t.Fatalf("failed to synchronise blocks: %v", err)
   650  	}
   651  	assertOwnChain(t, tester, targetBlocks+1)
   652  }
   653  
   654  // Tests that if a large batch of blocks are being downloaded, it is throttled
   655  // until the cached blocks are retrieved.
   656  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   657  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   658  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   659  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   660  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   661  
   662  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   663  	tester := newTester()
   664  	defer tester.terminate()
   665  
   666  	// Create a long block chain to download and the tester
   667  	targetBlocks := 8 * blockCacheItems
   668  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   669  
   670  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   671  
   672  	// Wrap the importer to allow stepping
   673  	blocked, proceed := uint32(0), make(chan struct{})
   674  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   675  		atomic.StoreUint32(&blocked, uint32(len(results)))
   676  		<-proceed
   677  	}
   678  	// Start a synchronisation concurrently
   679  	errc := make(chan error)
   680  	go func() {
   681  		errc <- tester.sync("peer", nil, mode)
   682  	}()
   683  	// Iteratively take some blocks, always checking the retrieval count
   684  	for {
   685  		// Check the retrieval count synchronously (! reason for this ugly block)
   686  		tester.lock.RLock()
   687  		retrieved := len(tester.ownBlocks)
   688  		tester.lock.RUnlock()
   689  		if retrieved >= targetBlocks+1 {
   690  			break
   691  		}
   692  		// Wait a bit for sync to throttle itself
   693  		var cached, frozen int
   694  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   695  			time.Sleep(25 * time.Millisecond)
   696  
   697  			tester.lock.Lock()
   698  			tester.downloader.queue.lock.Lock()
   699  			cached = len(tester.downloader.queue.blockDonePool)
   700  			if mode == FastSync {
   701  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   702  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   703  					cached = receipts
   704  					//}
   705  				}
   706  			}
   707  			frozen = int(atomic.LoadUint32(&blocked))
   708  			retrieved = len(tester.ownBlocks)
   709  			tester.downloader.queue.lock.Unlock()
   710  			tester.lock.Unlock()
   711  
   712  			if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   713  				break
   714  			}
   715  		}
   716  		// Make sure we filled up the cache, then exhaust it
   717  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   718  
   719  		tester.lock.RLock()
   720  		retrieved = len(tester.ownBlocks)
   721  		tester.lock.RUnlock()
   722  		if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   723  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   724  		}
   725  		// Permit the blocked blocks to import
   726  		if atomic.LoadUint32(&blocked) > 0 {
   727  			atomic.StoreUint32(&blocked, uint32(0))
   728  			proceed <- struct{}{}
   729  		}
   730  	}
   731  	// Check that we haven't pulled more blocks than available
   732  	assertOwnChain(t, tester, targetBlocks+1)
   733  	if err := <-errc; err != nil {
   734  		t.Fatalf("block synchronization failed: %v", err)
   735  	}
   736  }
   737  
   738  // Tests that simple synchronization against a forked chain works correctly. In
   739  // this test common ancestor lookup should *not* be short circuited, and a full
   740  // binary search should be executed.
   741  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   742  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   743  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   744  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   745  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   746  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   747  
   748  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   749  	t.Parallel()
   750  
   751  	tester := newTester()
   752  	defer tester.terminate()
   753  
   754  	// Create a long enough forked chain
   755  	common, fork := MaxHashFetch, 2*MaxHashFetch
   756  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   757  
   758  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   759  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   760  
   761  	// Synchronise with the peer and make sure all blocks were retrieved
   762  	if err := tester.sync("fork A", nil, mode); err != nil {
   763  		t.Fatalf("failed to synchronise blocks: %v", err)
   764  	}
   765  	assertOwnChain(t, tester, common+fork+1)
   766  
   767  	// Synchronise with the second peer and make sure that fork is pulled too
   768  	if err := tester.sync("fork B", nil, mode); err != nil {
   769  		t.Fatalf("failed to synchronise blocks: %v", err)
   770  	}
   771  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   772  }
   773  
   774  // Tests that synchronising against a much shorter but much heavyer fork works
   775  // corrently and is not dropped.
   776  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   777  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   778  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   779  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   780  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   781  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   782  
   783  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   784  	t.Parallel()
   785  
   786  	tester := newTester()
   787  	defer tester.terminate()
   788  
   789  	// Create a long enough forked chain
   790  	common, fork := MaxHashFetch, 4*MaxHashFetch
   791  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   792  
   793  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   794  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   795  
   796  	// Synchronise with the peer and make sure all blocks were retrieved
   797  	if err := tester.sync("light", nil, mode); err != nil {
   798  		t.Fatalf("failed to synchronise blocks: %v", err)
   799  	}
   800  	assertOwnChain(t, tester, common+fork+1)
   801  
   802  	// Synchronise with the second peer and make sure that fork is pulled too
   803  	if err := tester.sync("heavy", nil, mode); err != nil {
   804  		t.Fatalf("failed to synchronise blocks: %v", err)
   805  	}
   806  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   807  }
   808  
   809  // Tests that chain forks are contained within a certain interval of the current
   810  // chain head, ensuring that malicious peers cannot waste resources by feeding
   811  // long dead chains.
   812  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   813  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   814  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   815  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   816  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   817  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   818  
   819  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   820  	tester := newTester()
   821  	defer tester.terminate()
   822  
   823  	// Create a long enough forked chain
   824  	common, fork := 13, int(MaxForkAncestry+17)
   825  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   826  
   827  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   828  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   829  
   830  	// Synchronise with the peer and make sure all blocks were retrieved
   831  	if err := tester.sync("original", nil, mode); err != nil {
   832  		t.Fatalf("failed to synchronise blocks: %v", err)
   833  	}
   834  	assertOwnChain(t, tester, common+fork+1)
   835  
   836  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   837  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   838  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   839  	}
   840  }
   841  
   842  // Tests that chain forks are contained within a certain interval of the current
   843  // chain head for short but heavy forks too. These are a bit special because they
   844  // take different ancestor lookup paths.
   845  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   846  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   847  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   848  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   849  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   850  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   851  
   852  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   853  	t.Parallel()
   854  
   855  	tester := newTester()
   856  	defer tester.terminate()
   857  
   858  	// Create a long enough forked chain
   859  	common, fork := 13, int(MaxForkAncestry+17)
   860  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   861  
   862  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   863  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   864  
   865  	// Synchronise with the peer and make sure all blocks were retrieved
   866  	if err := tester.sync("original", nil, mode); err != nil {
   867  		t.Fatalf("failed to synchronise blocks: %v", err)
   868  	}
   869  	assertOwnChain(t, tester, common+fork+1)
   870  
   871  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   872  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   873  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   874  	}
   875  }
   876  
   877  // Tests that an inactive downloader will not accept incoming block headers and
   878  // bodies.
   879  func TestInactiveDownloader62(t *testing.T) {
   880  	t.Parallel()
   881  
   882  	tester := newTester()
   883  	defer tester.terminate()
   884  
   885  	// Check that neither block headers nor bodies are accepted
   886  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   887  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   888  	}
   889  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
   890  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   891  	}
   892  }
   893  
   894  // Tests that an inactive downloader will not accept incoming block headers,
   895  // bodies and receipts.
   896  func TestInactiveDownloader63(t *testing.T) {
   897  	t.Parallel()
   898  
   899  	tester := newTester()
   900  	defer tester.terminate()
   901  
   902  	// Check that neither block headers nor bodies are accepted
   903  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   904  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   905  	}
   906  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}); err != errNoSyncActive {
   907  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   908  	}
   909  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   910  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   911  	}
   912  }
   913  
   914  // Tests that a canceled download wipes all previously accumulated state.
   915  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   916  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   917  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   918  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   919  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   920  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   921  
   922  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   923  	t.Parallel()
   924  
   925  	tester := newTester()
   926  	defer tester.terminate()
   927  
   928  	// Create a small enough block chain to download and the tester
   929  	targetBlocks := blockCacheItems - 15
   930  	if targetBlocks >= MaxHashFetch {
   931  		targetBlocks = MaxHashFetch - 15
   932  	}
   933  	if targetBlocks >= MaxHeaderFetch {
   934  		targetBlocks = MaxHeaderFetch - 15
   935  	}
   936  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   937  
   938  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   939  
   940  	// Make sure canceling works with a pristine downloader
   941  	tester.downloader.Cancel()
   942  	if !tester.downloader.queue.Idle() {
   943  		t.Errorf("download queue not idle")
   944  	}
   945  	// Synchronise with the peer, but cancel afterwards
   946  	if err := tester.sync("peer", nil, mode); err != nil {
   947  		t.Fatalf("failed to synchronise blocks: %v", err)
   948  	}
   949  	tester.downloader.Cancel()
   950  	if !tester.downloader.queue.Idle() {
   951  		t.Errorf("download queue not idle")
   952  	}
   953  }
   954  
   955  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   956  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   957  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   958  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   959  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   960  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   961  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   962  
   963  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   964  	tester := newTester()
   965  	defer tester.terminate()
   966  
   967  	// Create various peers with various parts of the chain
   968  	targetPeers := 8
   969  	targetBlocks := targetPeers*blockCacheItems - 15
   970  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   971  
   972  	for i := 0; i < targetPeers; i++ {
   973  		id := fmt.Sprintf("peer #%d", i)
   974  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
   975  	}
   976  	if err := tester.sync("peer #0", nil, mode); err != nil {
   977  		t.Fatalf("failed to synchronise blocks: %v", err)
   978  	}
   979  	assertOwnChain(t, tester, targetBlocks+1)
   980  }
   981  
   982  // Tests that synchronisations behave well in multi-version protocol environments
   983  // and not wreak havoc on other nodes in the network.
   984  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
   985  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
   986  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
   987  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
   988  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
   989  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
   990  
   991  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
   992  	t.Parallel()
   993  
   994  	tester := newTester()
   995  	defer tester.terminate()
   996  
   997  	// Create a small enough block chain to download
   998  	targetBlocks := blockCacheItems - 15
   999  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1000  
  1001  	// Create peers of every type
  1002  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1003  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1004  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1005  
  1006  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1007  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1008  		t.Fatalf("failed to synchronise blocks: %v", err)
  1009  	}
  1010  	assertOwnChain(t, tester, targetBlocks+1)
  1011  
  1012  	// Check that no peers have been dropped off
  1013  	for _, version := range []int{62, 63, 64} {
  1014  		peer := fmt.Sprintf("peer %d", version)
  1015  		if _, ok := tester.peerHashes[peer]; !ok {
  1016  			t.Errorf("%s dropped", peer)
  1017  		}
  1018  	}
  1019  }
  1020  
  1021  // Tests that if a block is empty (e.g. header only), no body request should be
  1022  // made, and instead the header should be assembled into a whole block in itself.
  1023  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1024  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1025  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1026  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1027  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1028  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1029  
  1030  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1031  	t.Parallel()
  1032  
  1033  	tester := newTester()
  1034  	defer tester.terminate()
  1035  
  1036  	// Create a block chain to download
  1037  	targetBlocks := 2*blockCacheItems - 15
  1038  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1039  
  1040  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1041  
  1042  	// Instrument the downloader to signal body requests
  1043  	bodiesHave, receiptsHave := int32(0), int32(0)
  1044  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1045  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1046  	}
  1047  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1048  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1049  	}
  1050  	// Synchronise with the peer and make sure all blocks were retrieved
  1051  	if err := tester.sync("peer", nil, mode); err != nil {
  1052  		t.Fatalf("failed to synchronise blocks: %v", err)
  1053  	}
  1054  	assertOwnChain(t, tester, targetBlocks+1)
  1055  
  1056  	// Validate the number of block bodies that should have been requested
  1057  	bodiesNeeded, receiptsNeeded := 0, 0
  1058  	for _, block := range blocks {
  1059  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1060  			bodiesNeeded++
  1061  		}
  1062  	}
  1063  	for _, receipt := range receipts {
  1064  		if mode == FastSync && len(receipt) > 0 {
  1065  			receiptsNeeded++
  1066  		}
  1067  	}
  1068  	if int(bodiesHave) != bodiesNeeded {
  1069  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1070  	}
  1071  	if int(receiptsHave) != receiptsNeeded {
  1072  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1073  	}
  1074  }
  1075  
  1076  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1077  // stalling the downloader by feeding gapped header chains.
  1078  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1079  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1080  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1081  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1082  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1083  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1084  
  1085  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1086  	t.Parallel()
  1087  
  1088  	tester := newTester()
  1089  	defer tester.terminate()
  1090  
  1091  	// Create a small enough block chain to download
  1092  	targetBlocks := blockCacheItems - 15
  1093  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1094  
  1095  	// Attempt a full sync with an attacker feeding gapped headers
  1096  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1097  	missing := targetBlocks / 2
  1098  	delete(tester.peerHeaders["attack"], hashes[missing])
  1099  
  1100  	if err := tester.sync("attack", nil, mode); err == nil {
  1101  		t.Fatalf("succeeded attacker synchronisation")
  1102  	}
  1103  	// Synchronise with the valid peer and make sure sync succeeds
  1104  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1105  	if err := tester.sync("valid", nil, mode); err != nil {
  1106  		t.Fatalf("failed to synchronise blocks: %v", err)
  1107  	}
  1108  	assertOwnChain(t, tester, targetBlocks+1)
  1109  }
  1110  
  1111  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1112  // detects the invalid numbering.
  1113  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1114  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1115  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1116  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1117  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1118  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1119  
  1120  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1121  	t.Parallel()
  1122  
  1123  	tester := newTester()
  1124  	defer tester.terminate()
  1125  
  1126  	// Create a small enough block chain to download
  1127  	targetBlocks := blockCacheItems - 15
  1128  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1129  
  1130  	// Attempt a full sync with an attacker feeding shifted headers
  1131  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1132  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1133  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1134  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1135  
  1136  	if err := tester.sync("attack", nil, mode); err == nil {
  1137  		t.Fatalf("succeeded attacker synchronisation")
  1138  	}
  1139  	// Synchronise with the valid peer and make sure sync succeeds
  1140  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1141  	if err := tester.sync("valid", nil, mode); err != nil {
  1142  		t.Fatalf("failed to synchronise blocks: %v", err)
  1143  	}
  1144  	assertOwnChain(t, tester, targetBlocks+1)
  1145  }
  1146  
  1147  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1148  // for various failure scenarios. Afterwards a full sync is attempted to make
  1149  // sure no state was corrupted.
  1150  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1151  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1152  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1153  
  1154  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1155  	tester := newTester()
  1156  	defer tester.terminate()
  1157  
  1158  	// Create a small enough block chain to download
  1159  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1160  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1161  
  1162  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1163  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1164  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1165  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1166  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1167  
  1168  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1169  		t.Fatalf("succeeded fast attacker synchronisation")
  1170  	}
  1171  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1172  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1173  	}
  1174  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1175  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1176  	// rolled back, and also the pivot point being reverted to a non-block status.
  1177  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1178  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1179  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1180  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1181  
  1182  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1183  		t.Fatalf("succeeded block attacker synchronisation")
  1184  	}
  1185  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1186  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1187  	}
  1188  	if mode == FastSync {
  1189  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1190  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1191  		}
  1192  	}
  1193  	// Attempt to sync with an attacker that withholds promised blocks after the
  1194  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1195  	// but already imported pivot block.
  1196  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1197  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1198  
  1199  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1200  		for i := missing; i <= len(hashes); i++ {
  1201  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1202  		}
  1203  		tester.downloader.syncInitHook = nil
  1204  	}
  1205  
  1206  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1207  		t.Fatalf("succeeded withholding attacker synchronisation")
  1208  	}
  1209  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1210  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1211  	}
  1212  	if mode == FastSync {
  1213  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1214  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1215  		}
  1216  	}
  1217  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1218  	// rollback should also disable fast syncing for this process, verify that we
  1219  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1220  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1221  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1222  	if err := tester.sync("valid", nil, mode); err != nil {
  1223  		t.Fatalf("failed to synchronise blocks: %v", err)
  1224  	}
  1225  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1226  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1227  	}
  1228  	if mode != LightSync {
  1229  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1230  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1231  		}
  1232  	}
  1233  }
  1234  
  1235  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1236  // afterwards by not sending any useful hashes.
  1237  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1238  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1239  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1240  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1241  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1242  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1243  
  1244  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1245  	t.Parallel()
  1246  
  1247  	tester := newTester()
  1248  	defer tester.terminate()
  1249  
  1250  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1251  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1252  
  1253  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1254  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1255  	}
  1256  }
  1257  
  1258  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1259  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1260  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1261  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1262  
  1263  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1264  	t.Parallel()
  1265  
  1266  	// Define the disconnection requirement for individual hash fetch errors
  1267  	tests := []struct {
  1268  		result error
  1269  		drop   bool
  1270  	}{
  1271  		{nil, false},                        // Sync succeeded, all is well
  1272  		{errBusy, false},                    // Sync is already in progress, no problem
  1273  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1274  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1275  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1276  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1277  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1278  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1279  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1280  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1281  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1282  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1283  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1284  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1285  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1286  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1287  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1288  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1289  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1290  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1291  	}
  1292  	// Run the tests and check disconnection status
  1293  	tester := newTester()
  1294  	defer tester.terminate()
  1295  
  1296  	for i, tt := range tests {
  1297  		// Register a new peer and ensure it's presence
  1298  		id := fmt.Sprintf("test %d", i)
  1299  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1300  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1301  		}
  1302  		if _, ok := tester.peerHashes[id]; !ok {
  1303  			t.Fatalf("test %d: registered peer not found", i)
  1304  		}
  1305  		// Simulate a synchronisation and check the required result
  1306  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1307  
  1308  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1309  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1310  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1311  		}
  1312  	}
  1313  }
  1314  
  1315  // Tests that synchronisation progress (origin block number, current block number
  1316  // and highest block number) is tracked and updated correctly.
  1317  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1318  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1319  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1320  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1321  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1322  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1323  
  1324  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1325  	t.Parallel()
  1326  
  1327  	tester := newTester()
  1328  	defer tester.terminate()
  1329  
  1330  	// Create a small enough block chain to download
  1331  	targetBlocks := blockCacheItems - 15
  1332  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1333  
  1334  	// Set a sync init hook to catch progress changes
  1335  	starting := make(chan struct{})
  1336  	progress := make(chan struct{})
  1337  
  1338  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1339  		starting <- struct{}{}
  1340  		<-progress
  1341  	}
  1342  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1343  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1344  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1345  	}
  1346  	// Synchronise half the blocks and check initial progress
  1347  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1348  	pending := new(sync.WaitGroup)
  1349  	pending.Add(1)
  1350  
  1351  	go func() {
  1352  		defer pending.Done()
  1353  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1354  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1355  		}
  1356  	}()
  1357  	<-starting
  1358  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1359  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1360  	}
  1361  	progress <- struct{}{}
  1362  	pending.Wait()
  1363  
  1364  	// Synchronise all the blocks and check continuation progress
  1365  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1366  	pending.Add(1)
  1367  
  1368  	go func() {
  1369  		defer pending.Done()
  1370  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1371  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1372  		}
  1373  	}()
  1374  	<-starting
  1375  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1376  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1377  	}
  1378  	progress <- struct{}{}
  1379  	pending.Wait()
  1380  
  1381  	// Check final progress after successful sync
  1382  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1383  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1384  	}
  1385  }
  1386  
  1387  // Tests that synchronisation progress (origin block number and highest block
  1388  // number) is tracked and updated correctly in case of a fork (or manual head
  1389  // revertal).
  1390  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1391  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1392  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1393  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1394  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1395  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1396  
  1397  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1398  	t.Parallel()
  1399  
  1400  	tester := newTester()
  1401  	defer tester.terminate()
  1402  
  1403  	// Create a forked chain to simulate origin revertal
  1404  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1405  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1406  
  1407  	// Set a sync init hook to catch progress changes
  1408  	starting := make(chan struct{})
  1409  	progress := make(chan struct{})
  1410  
  1411  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1412  		starting <- struct{}{}
  1413  		<-progress
  1414  	}
  1415  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1416  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1417  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1418  	}
  1419  	// Synchronise with one of the forks and check progress
  1420  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1421  	pending := new(sync.WaitGroup)
  1422  	pending.Add(1)
  1423  
  1424  	go func() {
  1425  		defer pending.Done()
  1426  		if err := tester.sync("fork A", nil, mode); err != nil {
  1427  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1428  		}
  1429  	}()
  1430  	<-starting
  1431  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1432  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1433  	}
  1434  	progress <- struct{}{}
  1435  	pending.Wait()
  1436  
  1437  	// Simulate a successful sync above the fork
  1438  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1439  
  1440  	// Synchronise with the second fork and check progress resets
  1441  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1442  	pending.Add(1)
  1443  
  1444  	go func() {
  1445  		defer pending.Done()
  1446  		if err := tester.sync("fork B", nil, mode); err != nil {
  1447  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1448  		}
  1449  	}()
  1450  	<-starting
  1451  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1452  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1453  	}
  1454  	progress <- struct{}{}
  1455  	pending.Wait()
  1456  
  1457  	// Check final progress after successful sync
  1458  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1459  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1460  	}
  1461  }
  1462  
  1463  // Tests that if synchronisation is aborted due to some failure, then the progress
  1464  // origin is not updated in the next sync cycle, as it should be considered the
  1465  // continuation of the previous sync and not a new instance.
  1466  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1467  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1468  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1469  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1470  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1471  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1472  
  1473  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1474  	t.Parallel()
  1475  
  1476  	tester := newTester()
  1477  	defer tester.terminate()
  1478  
  1479  	// Create a small enough block chain to download
  1480  	targetBlocks := blockCacheItems - 15
  1481  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1482  
  1483  	// Set a sync init hook to catch progress changes
  1484  	starting := make(chan struct{})
  1485  	progress := make(chan struct{})
  1486  
  1487  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1488  		starting <- struct{}{}
  1489  		<-progress
  1490  	}
  1491  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1492  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1493  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1494  	}
  1495  	// Attempt a full sync with a faulty peer
  1496  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1497  	missing := targetBlocks / 2
  1498  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1499  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1500  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1501  
  1502  	pending := new(sync.WaitGroup)
  1503  	pending.Add(1)
  1504  
  1505  	go func() {
  1506  		defer pending.Done()
  1507  		if err := tester.sync("faulty", nil, mode); err == nil {
  1508  			panic("succeeded faulty synchronisation")
  1509  		}
  1510  	}()
  1511  	<-starting
  1512  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1513  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1514  	}
  1515  	progress <- struct{}{}
  1516  	pending.Wait()
  1517  
  1518  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1519  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1520  	pending.Add(1)
  1521  
  1522  	go func() {
  1523  		defer pending.Done()
  1524  		if err := tester.sync("valid", nil, mode); err != nil {
  1525  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1526  		}
  1527  	}()
  1528  	<-starting
  1529  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1530  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1531  	}
  1532  	progress <- struct{}{}
  1533  	pending.Wait()
  1534  
  1535  	// Check final progress after successful sync
  1536  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1537  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1538  	}
  1539  }
  1540  
  1541  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1542  // the progress height is successfully reduced at the next sync invocation.
  1543  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1544  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1545  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1546  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1547  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1548  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1549  
  1550  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1551  	t.Parallel()
  1552  
  1553  	tester := newTester()
  1554  	defer tester.terminate()
  1555  
  1556  	// Create a small block chain
  1557  	targetBlocks := blockCacheItems - 15
  1558  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1559  
  1560  	// Set a sync init hook to catch progress changes
  1561  	starting := make(chan struct{})
  1562  	progress := make(chan struct{})
  1563  
  1564  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1565  		starting <- struct{}{}
  1566  		<-progress
  1567  	}
  1568  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1569  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1570  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1571  	}
  1572  	//  Create and sync with an attacker that promises a higher chain than available
  1573  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1574  	for i := 1; i < 3; i++ {
  1575  		delete(tester.peerHeaders["attack"], hashes[i])
  1576  		delete(tester.peerBlocks["attack"], hashes[i])
  1577  		delete(tester.peerReceipts["attack"], hashes[i])
  1578  	}
  1579  
  1580  	pending := new(sync.WaitGroup)
  1581  	pending.Add(1)
  1582  
  1583  	go func() {
  1584  		defer pending.Done()
  1585  		if err := tester.sync("attack", nil, mode); err == nil {
  1586  			panic("succeeded attacker synchronisation")
  1587  		}
  1588  	}()
  1589  	<-starting
  1590  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1591  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1592  	}
  1593  	progress <- struct{}{}
  1594  	pending.Wait()
  1595  
  1596  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1597  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1598  	pending.Add(1)
  1599  
  1600  	go func() {
  1601  		defer pending.Done()
  1602  		if err := tester.sync("valid", nil, mode); err != nil {
  1603  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1604  		}
  1605  	}()
  1606  	<-starting
  1607  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1608  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1609  	}
  1610  	progress <- struct{}{}
  1611  	pending.Wait()
  1612  
  1613  	// Check final progress after successful sync
  1614  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1615  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1616  	}
  1617  }
  1618  
  1619  // This test reproduces an issue where unexpected deliveries would
  1620  // block indefinitely if they arrived at the right time.
  1621  // We use data driven subtests to manage this so that it will be parallel on its own
  1622  // and not with the other tests, avoiding intermittent failures.
  1623  func TestDeliverHeadersHang(t *testing.T) {
  1624  	testCases := []struct {
  1625  		protocol int
  1626  		syncMode SyncMode
  1627  	}{
  1628  		{62, FullSync},
  1629  		{63, FullSync},
  1630  		{63, FastSync},
  1631  		{64, FullSync},
  1632  		{64, FastSync},
  1633  		{64, LightSync},
  1634  	}
  1635  	for _, tc := range testCases {
  1636  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1637  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1638  		})
  1639  	}
  1640  }
  1641  
  1642  type floodingTestPeer struct {
  1643  	peer   Peer
  1644  	tester *downloadTester
  1645  	pend   sync.WaitGroup
  1646  }
  1647  
  1648  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1649  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1650  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1651  }
  1652  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1653  	return ftp.peer.RequestBodies(hashes)
  1654  }
  1655  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1656  	return ftp.peer.RequestReceipts(hashes)
  1657  }
  1658  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1659  	return ftp.peer.RequestNodeData(hashes)
  1660  }
  1661  
  1662  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1663  	const size = 500
  1664  	deliveriesDone := make(chan struct{}, size)
  1665  	ftp.pend.Add(size)
  1666  	for i := 0; i < size; i++ {
  1667  		peer := fmt.Sprintf("fake-peer%d", i)
  1668  		go func() {
  1669  			defer ftp.pend.Done()
  1670  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1671  			deliveriesDone <- struct{}{}
  1672  		}()
  1673  	}
  1674  	// Deliver the actual requested headers.
  1675  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1676  	// None of the extra deliveries should block.
  1677  	timeout := time.After(60 * time.Second)
  1678  	for i := 0; i < size; i++ {
  1679  		select {
  1680  		case <-deliveriesDone:
  1681  		case <-timeout:
  1682  			panic("blocked")
  1683  		}
  1684  	}
  1685  	return nil
  1686  }
  1687  
  1688  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1689  	t.Parallel()
  1690  
  1691  	master := newTester()
  1692  	defer master.terminate()
  1693  
  1694  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1695  	for i := 0; i < 200; i++ {
  1696  		tester := newTester()
  1697  		tester.peerDb = master.peerDb
  1698  
  1699  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1700  		// Whenever the downloader requests headers, flood it with
  1701  		// a lot of unrequested header deliveries.
  1702  		testPeer := &floodingTestPeer{
  1703  			peer:   tester.downloader.peers.peers["peer"].peer,
  1704  			tester: tester,
  1705  		}
  1706  		tester.downloader.peers.peers["peer"].peer = testPeer
  1707  		if err := tester.sync("peer", nil, mode); err != nil {
  1708  			t.Errorf("test %d: sync failed: %v", i, err)
  1709  		}
  1710  		tester.terminate()
  1711  
  1712  		// Flush all goroutines to prevent messing with subsequent tests
  1713  		testPeer.pend.Wait()
  1714  	}
  1715  }