github.com/arieschain/arieschain@v0.0.0-20191023063405-37c074544356/qct/downloader/downloader_test.go (about)

     1  package downloader
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math/big"
     7  	"sync"
     8  	"sync/atomic"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/quickchainproject/quickchain/common"
    13  	"github.com/quickchainproject/quickchain/consensus/qcthash"
    14  	"github.com/quickchainproject/quickchain/core"
    15  	"github.com/quickchainproject/quickchain/core/types"
    16  	"github.com/quickchainproject/quickchain/crypto"
    17  	"github.com/quickchainproject/quickchain/event"
    18  	"github.com/quickchainproject/quickchain/qctdb"
    19  	"github.com/quickchainproject/quickchain/params"
    20  	"github.com/quickchainproject/quickchain/trie"
    21  )
    22  
    23  var (
    24  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    25  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    26  )
    27  
    28  // Reduce some of the parameters to make the tester faster.
    29  func init() {
    30  	MaxForkAncestry = uint64(10000)
    31  	blockCacheItems = 1024
    32  	fsHeaderContCheck = 500 * time.Millisecond
    33  }
    34  
    35  // downloadTester is a test simulator for mocking out local block chain.
    36  type downloadTester struct {
    37  	downloader *Downloader
    38  
    39  	genesis *types.Block   // Genesis blocks used by the tester and peers
    40  	stateDb qctdb.Database // Database used by the tester for syncing from peers
    41  	peerDb  qctdb.Database // Database of the peers containing all data
    42  
    43  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    44  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    45  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    46  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    47  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    48  
    49  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    50  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    51  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    52  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    53  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    54  
    55  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    56  
    57  	lock sync.RWMutex
    58  }
    59  
    60  // newTester creates a new downloader test mocker.
    61  func newTester() *downloadTester {
    62  	testdb, _ := qctdb.NewMemDatabase()
    63  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    64  
    65  	tester := &downloadTester{
    66  		genesis:           genesis,
    67  		peerDb:            testdb,
    68  		ownHashes:         []common.Hash{genesis.Hash()},
    69  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    70  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    71  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    72  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    73  		peerHashes:        make(map[string][]common.Hash),
    74  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    75  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    76  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    77  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    78  		peerMissingStates: make(map[string]map[common.Hash]bool),
    79  	}
    80  	tester.stateDb, _ = qctdb.NewMemDatabase()
    81  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    82  
    83  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
    84  
    85  	return tester
    86  }
    87  
    88  // makeChain creates a chain of n blocks starting at and including parent.
    89  // the returned hash chain is ordered head->parent. In addition, every 3rd block
    90  // contains a transaction and every 5th an uncle to allow testing correct block
    91  // reassembly.
    92  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
    93  	// Generate the block chain
    94  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, qcthash.NewFaker(), dl.peerDb, n, func(i int, block *core.BlockGen) {
    95  		block.SetCoinbase(common.Address{seed})
    96  
    97  		// If a heavy chain is requested, delay blocks to raise difficulty
    98  		if heavy {
    99  			block.OffsetTime(-1)
   100  		}
   101  		// If the block number is multiple of 3, send a bonus transaction to the miner
   102  		if parent == dl.genesis && i%3 == 0 {
   103  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   104  			tx, err := types.SignTx(types.NewTransaction(types.Binary, block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   105  			if err != nil {
   106  				panic(err)
   107  			}
   108  			block.AddTx(tx)
   109  		}
   110  		// If the block number is a multiple of 5, add a bonus uncle to the block
   111  		if i > 0 && i%5 == 0 {
   112  			block.AddUncle(&types.Header{
   113  				ParentHash:  block.PrevBlock(i - 1).Hash(),
   114  				Number:      big.NewInt(block.Number().Int64() - 1),
   115  				DposContext: &types.DposContextProto{},
   116  			})
   117  		}
   118  	})
   119  	// Convert the block-chain into a hash-chain and header/block maps
   120  	hashes := make([]common.Hash, n+1)
   121  	hashes[len(hashes)-1] = parent.Hash()
   122  
   123  	headerm := make(map[common.Hash]*types.Header, n+1)
   124  	headerm[parent.Hash()] = parent.Header()
   125  
   126  	blockm := make(map[common.Hash]*types.Block, n+1)
   127  	blockm[parent.Hash()] = parent
   128  
   129  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   130  	receiptm[parent.Hash()] = parentReceipts
   131  
   132  	for i, b := range blocks {
   133  		hashes[len(hashes)-i-2] = b.Hash()
   134  		headerm[b.Hash()] = b.Header()
   135  		blockm[b.Hash()] = b
   136  		receiptm[b.Hash()] = receipts[i]
   137  	}
   138  	return hashes, headerm, blockm, receiptm
   139  }
   140  
   141  // makeChainFork creates two chains of length n, such that h1[:f] and
   142  // h2[:f] are different but have a common suffix of length n-f.
   143  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   144  	// Create the common suffix
   145  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   146  
   147  	// Create the forks, making the second heavier if non balanced forks were requested
   148  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   149  	hashes1 = append(hashes1, hashes[1:]...)
   150  
   151  	heavy := false
   152  	if !balanced {
   153  		heavy = true
   154  	}
   155  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   156  	hashes2 = append(hashes2, hashes[1:]...)
   157  
   158  	for hash, header := range headers {
   159  		headers1[hash] = header
   160  		headers2[hash] = header
   161  	}
   162  	for hash, block := range blocks {
   163  		blocks1[hash] = block
   164  		blocks2[hash] = block
   165  	}
   166  	for hash, receipt := range receipts {
   167  		receipts1[hash] = receipt
   168  		receipts2[hash] = receipt
   169  	}
   170  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   171  }
   172  
   173  // terminate aborts any operations on the embedded downloader and releases all
   174  // held resources.
   175  func (dl *downloadTester) terminate() {
   176  	dl.downloader.Terminate()
   177  }
   178  
   179  // sync starts synchronizing with a remote peer, blocking until it completes.
   180  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   181  	dl.lock.RLock()
   182  	hash := dl.peerHashes[id][0]
   183  	// If no particular TD was requested, load from the peer's blockchain
   184  	if td == nil {
   185  		td = big.NewInt(1)
   186  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   187  			td = diff
   188  		}
   189  	}
   190  	dl.lock.RUnlock()
   191  
   192  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   193  	err := dl.downloader.synchronise(id, hash, td, mode)
   194  	select {
   195  	case <-dl.downloader.cancelCh:
   196  		// Ok, downloader fully cancelled after sync cycle
   197  	default:
   198  		// Downloader is still accepting packets, can block a peer up
   199  		panic("downloader active post sync cycle") // panic will be caught by tester
   200  	}
   201  	return err
   202  }
   203  
   204  // HasHeader checks if a header is present in the testers canonical chain.
   205  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   206  	return dl.GetHeaderByHash(hash) != nil
   207  }
   208  
   209  // HasBlock checks if a block is present in the testers canonical chain.
   210  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   211  	return dl.GetBlockByHash(hash) != nil
   212  }
   213  
   214  // GetHeader retrieves a header from the testers canonical chain.
   215  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   216  	dl.lock.RLock()
   217  	defer dl.lock.RUnlock()
   218  
   219  	return dl.ownHeaders[hash]
   220  }
   221  
   222  // GetBlock retrieves a block from the testers canonical chain.
   223  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   224  	dl.lock.RLock()
   225  	defer dl.lock.RUnlock()
   226  
   227  	return dl.ownBlocks[hash]
   228  }
   229  
   230  // CurrentHeader retrieves the current head header from the canonical chain.
   231  func (dl *downloadTester) CurrentHeader() *types.Header {
   232  	dl.lock.RLock()
   233  	defer dl.lock.RUnlock()
   234  
   235  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   236  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   237  			return header
   238  		}
   239  	}
   240  	return dl.genesis.Header()
   241  }
   242  
   243  // CurrentBlock retrieves the current head block from the canonical chain.
   244  func (dl *downloadTester) CurrentBlock() *types.Block {
   245  	dl.lock.RLock()
   246  	defer dl.lock.RUnlock()
   247  
   248  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   249  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   250  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   251  				return block
   252  			}
   253  		}
   254  	}
   255  	return dl.genesis
   256  }
   257  
   258  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   259  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   260  	dl.lock.RLock()
   261  	defer dl.lock.RUnlock()
   262  
   263  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   264  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   265  			return block
   266  		}
   267  	}
   268  	return dl.genesis
   269  }
   270  
   271  // FastSyncCommitHead manually sets the head block to a given hash.
   272  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   273  	// For now only check that the state trie is correct
   274  	if block := dl.GetBlockByHash(hash); block != nil {
   275  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0)
   276  		return err
   277  	}
   278  	return fmt.Errorf("non existent block: %x", hash[:4])
   279  }
   280  
   281  // GetTd retrieves the block's total difficulty from the canonical chain.
   282  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   283  	dl.lock.RLock()
   284  	defer dl.lock.RUnlock()
   285  
   286  	return dl.ownChainTd[hash]
   287  }
   288  
   289  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   290  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   291  	dl.lock.Lock()
   292  	defer dl.lock.Unlock()
   293  
   294  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   295  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   296  		return 0, errors.New("unknown parent")
   297  	}
   298  	for i := 1; i < len(headers); i++ {
   299  		if headers[i].ParentHash != headers[i-1].Hash() {
   300  			return i, errors.New("unknown parent")
   301  		}
   302  	}
   303  	// Do a full insert if pre-checks passed
   304  	for i, header := range headers {
   305  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   306  			continue
   307  		}
   308  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   309  			return i, errors.New("unknown parent")
   310  		}
   311  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   312  		dl.ownHeaders[header.Hash()] = header
   313  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   314  	}
   315  	return len(headers), nil
   316  }
   317  
   318  // InsertChain injects a new batch of blocks into the simulated chain.
   319  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   320  	dl.lock.Lock()
   321  	defer dl.lock.Unlock()
   322  
   323  	for i, block := range blocks {
   324  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   325  			return i, errors.New("unknown parent")
   326  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   327  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   328  		}
   329  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   330  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   331  			dl.ownHeaders[block.Hash()] = block.Header()
   332  		}
   333  		dl.ownBlocks[block.Hash()] = block
   334  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   335  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   336  	}
   337  	return len(blocks), nil
   338  }
   339  
   340  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   341  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   342  	dl.lock.Lock()
   343  	defer dl.lock.Unlock()
   344  
   345  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   346  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   347  			return i, errors.New("unknown owner")
   348  		}
   349  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   350  			return i, errors.New("unknown parent")
   351  		}
   352  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   353  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   354  	}
   355  	return len(blocks), nil
   356  }
   357  
   358  // Rollback removes some recently added elements from the chain.
   359  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   360  	dl.lock.Lock()
   361  	defer dl.lock.Unlock()
   362  
   363  	for i := len(hashes) - 1; i >= 0; i-- {
   364  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   365  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   366  		}
   367  		delete(dl.ownChainTd, hashes[i])
   368  		delete(dl.ownHeaders, hashes[i])
   369  		delete(dl.ownReceipts, hashes[i])
   370  		delete(dl.ownBlocks, hashes[i])
   371  	}
   372  }
   373  
   374  // newPeer registers a new block download source into the downloader.
   375  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   376  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   377  }
   378  
   379  // newSlowPeer registers a new block download source into the downloader, with a
   380  // specific delay time on processing the network packets sent to it, simulating
   381  // potentially slow network IO.
   382  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   383  	dl.lock.Lock()
   384  	defer dl.lock.Unlock()
   385  
   386  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   387  	if err == nil {
   388  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   389  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   390  		copy(dl.peerHashes[id], hashes)
   391  
   392  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   393  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   394  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   395  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   396  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   397  
   398  		genesis := hashes[len(hashes)-1]
   399  		if header := headers[genesis]; header != nil {
   400  			dl.peerHeaders[id][genesis] = header
   401  			dl.peerChainTds[id][genesis] = header.Difficulty
   402  		}
   403  		if block := blocks[genesis]; block != nil {
   404  			dl.peerBlocks[id][genesis] = block
   405  			dl.peerChainTds[id][genesis] = block.Difficulty()
   406  		}
   407  
   408  		for i := len(hashes) - 2; i >= 0; i-- {
   409  			hash := hashes[i]
   410  
   411  			if header, ok := headers[hash]; ok {
   412  				dl.peerHeaders[id][hash] = header
   413  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   414  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   415  				}
   416  			}
   417  			if block, ok := blocks[hash]; ok {
   418  				dl.peerBlocks[id][hash] = block
   419  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   420  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   421  				}
   422  			}
   423  			if receipt, ok := receipts[hash]; ok {
   424  				dl.peerReceipts[id][hash] = receipt
   425  			}
   426  		}
   427  	}
   428  	return err
   429  }
   430  
   431  // dropPeer simulates a hard peer removal from the connection pool.
   432  func (dl *downloadTester) dropPeer(id string) {
   433  	dl.lock.Lock()
   434  	defer dl.lock.Unlock()
   435  
   436  	delete(dl.peerHashes, id)
   437  	delete(dl.peerHeaders, id)
   438  	delete(dl.peerBlocks, id)
   439  	delete(dl.peerChainTds, id)
   440  
   441  	dl.downloader.UnregisterPeer(id)
   442  }
   443  
   444  type downloadTesterPeer struct {
   445  	dl    *downloadTester
   446  	id    string
   447  	delay time.Duration
   448  	lock  sync.RWMutex
   449  }
   450  
   451  // setDelay is a thread safe setter for the network delay value.
   452  func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   453  	dlp.lock.Lock()
   454  	defer dlp.lock.Unlock()
   455  
   456  	dlp.delay = delay
   457  }
   458  
   459  // waitDelay is a thread safe way to sleep for the configured time.
   460  func (dlp *downloadTesterPeer) waitDelay() {
   461  	dlp.lock.RLock()
   462  	delay := dlp.delay
   463  	dlp.lock.RUnlock()
   464  
   465  	time.Sleep(delay)
   466  }
   467  
   468  // Head constructs a function to retrieve a peer's current head hash
   469  // and total difficulty.
   470  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   471  	dlp.dl.lock.RLock()
   472  	defer dlp.dl.lock.RUnlock()
   473  
   474  	return dlp.dl.peerHashes[dlp.id][0], nil
   475  }
   476  
   477  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   478  // origin; associated with a particular peer in the download tester. The returned
   479  // function can be used to retrieve batches of headers from the particular peer.
   480  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   481  	// Find the canonical number of the hash
   482  	dlp.dl.lock.RLock()
   483  	number := uint64(0)
   484  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   485  		if hash == origin {
   486  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   487  			break
   488  		}
   489  	}
   490  	dlp.dl.lock.RUnlock()
   491  
   492  	// Use the absolute header fetcher to satisfy the query
   493  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   494  }
   495  
   496  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   497  // origin; associated with a particular peer in the download tester. The returned
   498  // function can be used to retrieve batches of headers from the particular peer.
   499  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   500  	dlp.waitDelay()
   501  
   502  	dlp.dl.lock.RLock()
   503  	defer dlp.dl.lock.RUnlock()
   504  
   505  	// Gather the next batch of headers
   506  	hashes := dlp.dl.peerHashes[dlp.id]
   507  	headers := dlp.dl.peerHeaders[dlp.id]
   508  	result := make([]*types.Header, 0, amount)
   509  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   510  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   511  			result = append(result, header)
   512  		}
   513  	}
   514  	// Delay delivery a bit to allow attacks to unfold
   515  	go func() {
   516  		time.Sleep(time.Millisecond)
   517  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   518  	}()
   519  	return nil
   520  }
   521  
   522  // RequestBodies constructs a getBlockBodies method associated with a particular
   523  // peer in the download tester. The returned function can be used to retrieve
   524  // batches of block bodies from the particularly requested peer.
   525  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   526  	dlp.waitDelay()
   527  
   528  	dlp.dl.lock.RLock()
   529  	defer dlp.dl.lock.RUnlock()
   530  
   531  	blocks := dlp.dl.peerBlocks[dlp.id]
   532  
   533  	transactions := make([][]*types.Transaction, 0, len(hashes))
   534  	uncles := make([][]*types.Header, 0, len(hashes))
   535  
   536  	for _, hash := range hashes {
   537  		if block, ok := blocks[hash]; ok {
   538  			transactions = append(transactions, block.Transactions())
   539  			uncles = append(uncles, block.Uncles())
   540  		}
   541  	}
   542  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   543  
   544  	return nil
   545  }
   546  
   547  // RequestReceipts constructs a getReceipts method associated with a particular
   548  // peer in the download tester. The returned function can be used to retrieve
   549  // batches of block receipts from the particularly requested peer.
   550  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   551  	dlp.waitDelay()
   552  
   553  	dlp.dl.lock.RLock()
   554  	defer dlp.dl.lock.RUnlock()
   555  
   556  	receipts := dlp.dl.peerReceipts[dlp.id]
   557  
   558  	results := make([][]*types.Receipt, 0, len(hashes))
   559  	for _, hash := range hashes {
   560  		if receipt, ok := receipts[hash]; ok {
   561  			results = append(results, receipt)
   562  		}
   563  	}
   564  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   565  
   566  	return nil
   567  }
   568  
   569  // RequestNodeData constructs a getNodeData method associated with a particular
   570  // peer in the download tester. The returned function can be used to retrieve
   571  // batches of node state data from the particularly requested peer.
   572  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   573  	dlp.waitDelay()
   574  
   575  	dlp.dl.lock.RLock()
   576  	defer dlp.dl.lock.RUnlock()
   577  
   578  	results := make([][]byte, 0, len(hashes))
   579  	for _, hash := range hashes {
   580  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   581  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   582  				results = append(results, data)
   583  			}
   584  		}
   585  	}
   586  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   587  
   588  	return nil
   589  }
   590  
   591  // assertOwnChain checks if the local chain contains the correct number of items
   592  // of the various chain components.
   593  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   594  	assertOwnForkedChain(t, tester, 1, []int{length})
   595  }
   596  
   597  // assertOwnForkedChain checks if the local forked chain contains the correct
   598  // number of items of the various chain components.
   599  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   600  	// Initialize the counters for the first fork
   601  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   602  
   603  	if receipts < 0 {
   604  		receipts = 1
   605  	}
   606  	// Update the counters for each subsequent fork
   607  	for _, length := range lengths[1:] {
   608  		headers += length - common
   609  		blocks += length - common
   610  		receipts += length - common - fsMinFullBlocks
   611  	}
   612  	switch tester.downloader.mode {
   613  	case FullSync:
   614  		receipts = 1
   615  	case LightSync:
   616  		blocks, receipts = 1, 1
   617  	}
   618  	if hs := len(tester.ownHeaders); hs != headers {
   619  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   620  	}
   621  	if bs := len(tester.ownBlocks); bs != blocks {
   622  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   623  	}
   624  	if rs := len(tester.ownReceipts); rs != receipts {
   625  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   626  	}
   627  	// Verify the state trie too for fast syncs
   628  	/*if tester.downloader.mode == FastSync {
   629  		pivot := uint64(0)
   630  		var index int
   631  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   632  			index = pivot
   633  		} else {
   634  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   635  		}
   636  		if index > 0 {
   637  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   638  				t.Fatalf("state reconstruction failed: %v", err)
   639  			}
   640  		}
   641  	}*/
   642  }
   643  
   644  // Tests that simple synchronization against a canonical chain works correctly.
   645  // In this test common ancestor lookup should be short circuited and not require
   646  // binary searching.
   647  func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
   648  func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
   649  func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
   650  func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
   651  func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
   652  func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
   653  
   654  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   655  	t.Parallel()
   656  
   657  	tester := newTester()
   658  	defer tester.terminate()
   659  
   660  	// Create a small enough block chain to download
   661  	targetBlocks := blockCacheItems - 15
   662  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   663  
   664  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   665  
   666  	// Synchronise with the peer and make sure all relevant data was retrieved
   667  	if err := tester.sync("peer", nil, mode); err != nil {
   668  		t.Fatalf("failed to synchronise blocks: %v", err)
   669  	}
   670  	assertOwnChain(t, tester, targetBlocks+1)
   671  }
   672  
   673  // Tests that if a large batch of blocks are being downloaded, it is throttled
   674  // until the cached blocks are retrieved.
   675  func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
   676  func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
   677  func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
   678  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   679  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   680  
   681  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   682  	t.Parallel()
   683  	tester := newTester()
   684  	defer tester.terminate()
   685  
   686  	// Create a long block chain to download and the tester
   687  	targetBlocks := 8 * blockCacheItems
   688  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   689  
   690  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   691  
   692  	// Wrap the importer to allow stepping
   693  	blocked, proceed := uint32(0), make(chan struct{})
   694  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   695  		atomic.StoreUint32(&blocked, uint32(len(results)))
   696  		<-proceed
   697  	}
   698  	// Start a synchronisation concurrently
   699  	errc := make(chan error)
   700  	go func() {
   701  		errc <- tester.sync("peer", nil, mode)
   702  	}()
   703  	// Iteratively take some blocks, always checking the retrieval count
   704  	for {
   705  		// Check the retrieval count synchronously (! reason for this ugly block)
   706  		tester.lock.RLock()
   707  		retrieved := len(tester.ownBlocks)
   708  		tester.lock.RUnlock()
   709  		if retrieved >= targetBlocks+1 {
   710  			break
   711  		}
   712  		// Wait a bit for sync to throttle itself
   713  		var cached, frozen int
   714  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   715  			time.Sleep(25 * time.Millisecond)
   716  
   717  			tester.lock.Lock()
   718  			tester.downloader.queue.lock.Lock()
   719  			cached = len(tester.downloader.queue.blockDonePool)
   720  			if mode == FastSync {
   721  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   722  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   723  					cached = receipts
   724  					//}
   725  				}
   726  			}
   727  			frozen = int(atomic.LoadUint32(&blocked))
   728  			retrieved = len(tester.ownBlocks)
   729  			tester.downloader.queue.lock.Unlock()
   730  			tester.lock.Unlock()
   731  
   732  			if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
   733  				break
   734  			}
   735  		}
   736  		// Make sure we filled up the cache, then exhaust it
   737  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   738  
   739  		tester.lock.RLock()
   740  		retrieved = len(tester.ownBlocks)
   741  		tester.lock.RUnlock()
   742  		if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
   743  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   744  		}
   745  		// Permit the blocked blocks to import
   746  		if atomic.LoadUint32(&blocked) > 0 {
   747  			atomic.StoreUint32(&blocked, uint32(0))
   748  			proceed <- struct{}{}
   749  		}
   750  	}
   751  	// Check that we haven't pulled more blocks than available
   752  	assertOwnChain(t, tester, targetBlocks+1)
   753  	if err := <-errc; err != nil {
   754  		t.Fatalf("block synchronization failed: %v", err)
   755  	}
   756  }
   757  
   758  // Tests that simple synchronization against a forked chain works correctly. In
   759  // this test common ancestor lookup should *not* be short circuited, and a full
   760  // binary search should be executed.
   761  func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
   762  func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
   763  func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
   764  func TestForkedSync64Full(t *testing.T)  { testForkedSync(t, 64, FullSync) }
   765  func TestForkedSync64Fast(t *testing.T)  { testForkedSync(t, 64, FastSync) }
   766  func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
   767  
   768  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   769  	t.Parallel()
   770  
   771  	tester := newTester()
   772  	defer tester.terminate()
   773  
   774  	// Create a long enough forked chain
   775  	common, fork := MaxHashFetch, 2*MaxHashFetch
   776  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   777  
   778  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   779  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   780  
   781  	// Synchronise with the peer and make sure all blocks were retrieved
   782  	if err := tester.sync("fork A", nil, mode); err != nil {
   783  		t.Fatalf("failed to synchronise blocks: %v", err)
   784  	}
   785  	assertOwnChain(t, tester, common+fork+1)
   786  
   787  	// Synchronise with the second peer and make sure that fork is pulled too
   788  	if err := tester.sync("fork B", nil, mode); err != nil {
   789  		t.Fatalf("failed to synchronise blocks: %v", err)
   790  	}
   791  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   792  }
   793  
   794  // Tests that synchronising against a much shorter but much heavyer fork works
   795  // corrently and is not dropped.
   796  func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
   797  func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
   798  func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
   799  func TestHeavyForkedSync64Full(t *testing.T)  { testHeavyForkedSync(t, 64, FullSync) }
   800  func TestHeavyForkedSync64Fast(t *testing.T)  { testHeavyForkedSync(t, 64, FastSync) }
   801  func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
   802  
   803  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   804  	t.Parallel()
   805  
   806  	tester := newTester()
   807  	defer tester.terminate()
   808  
   809  	// Create a long enough forked chain
   810  	common, fork := MaxHashFetch, 4*MaxHashFetch
   811  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   812  
   813  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   814  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   815  
   816  	// Synchronise with the peer and make sure all blocks were retrieved
   817  	if err := tester.sync("light", nil, mode); err != nil {
   818  		t.Fatalf("failed to synchronise blocks: %v", err)
   819  	}
   820  	assertOwnChain(t, tester, common+fork+1)
   821  
   822  	// Synchronise with the second peer and make sure that fork is pulled too
   823  	if err := tester.sync("heavy", nil, mode); err != nil {
   824  		t.Fatalf("failed to synchronise blocks: %v", err)
   825  	}
   826  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   827  }
   828  
   829  // Tests that chain forks are contained within a certain interval of the current
   830  // chain head, ensuring that malicious peers cannot waste resources by feeding
   831  // long dead chains.
   832  func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
   833  func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
   834  func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
   835  func TestBoundedForkedSync64Full(t *testing.T)  { testBoundedForkedSync(t, 64, FullSync) }
   836  func TestBoundedForkedSync64Fast(t *testing.T)  { testBoundedForkedSync(t, 64, FastSync) }
   837  func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
   838  
   839  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   840  	t.Parallel()
   841  
   842  	tester := newTester()
   843  	defer tester.terminate()
   844  
   845  	// Create a long enough forked chain
   846  	common, fork := 13, int(MaxForkAncestry+17)
   847  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   848  
   849  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   850  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   851  
   852  	// Synchronise with the peer and make sure all blocks were retrieved
   853  	if err := tester.sync("original", nil, mode); err != nil {
   854  		t.Fatalf("failed to synchronise blocks: %v", err)
   855  	}
   856  	assertOwnChain(t, tester, common+fork+1)
   857  
   858  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   859  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   860  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   861  	}
   862  }
   863  
   864  // Tests that chain forks are contained within a certain interval of the current
   865  // chain head for short but heavy forks too. These are a bit special because they
   866  // take different ancestor lookup paths.
   867  func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
   868  func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
   869  func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
   870  func TestBoundedHeavyForkedSync64Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FullSync) }
   871  func TestBoundedHeavyForkedSync64Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 64, FastSync) }
   872  func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
   873  
   874  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   875  	t.Parallel()
   876  
   877  	tester := newTester()
   878  	defer tester.terminate()
   879  
   880  	// Create a long enough forked chain
   881  	common, fork := 13, int(MaxForkAncestry+17)
   882  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   883  
   884  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   885  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   886  
   887  	// Synchronise with the peer and make sure all blocks were retrieved
   888  	if err := tester.sync("original", nil, mode); err != nil {
   889  		t.Fatalf("failed to synchronise blocks: %v", err)
   890  	}
   891  	assertOwnChain(t, tester, common+fork+1)
   892  
   893  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   894  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   895  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   896  	}
   897  }
   898  
   899  // Tests that an inactive downloader will not accept incoming block headers and
   900  // bodies.
   901  func TestInactiveDownloader62(t *testing.T) {
   902  	t.Parallel()
   903  
   904  	tester := newTester()
   905  	defer tester.terminate()
   906  
   907  	// Check that neither block headers nor bodies are accepted
   908  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   909  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   910  	}
   911  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   912  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   913  	}
   914  }
   915  
   916  // Tests that an inactive downloader will not accept incoming block headers,
   917  // bodies and receipts.
   918  func TestInactiveDownloader63(t *testing.T) {
   919  	t.Parallel()
   920  
   921  	tester := newTester()
   922  	defer tester.terminate()
   923  
   924  	// Check that neither block headers nor bodies are accepted
   925  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   926  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   927  	}
   928  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   929  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   930  	}
   931  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   932  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   933  	}
   934  }
   935  
   936  // Tests that a canceled download wipes all previously accumulated state.
   937  func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
   938  func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
   939  func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
   940  func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
   941  func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
   942  func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
   943  
   944  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   945  	t.Parallel()
   946  
   947  	tester := newTester()
   948  	defer tester.terminate()
   949  
   950  	// Create a small enough block chain to download and the tester
   951  	targetBlocks := blockCacheItems - 15
   952  	if targetBlocks >= MaxHashFetch {
   953  		targetBlocks = MaxHashFetch - 15
   954  	}
   955  	if targetBlocks >= MaxHeaderFetch {
   956  		targetBlocks = MaxHeaderFetch - 15
   957  	}
   958  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   959  
   960  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   961  
   962  	// Make sure canceling works with a pristine downloader
   963  	tester.downloader.Cancel()
   964  	if !tester.downloader.queue.Idle() {
   965  		t.Errorf("download queue not idle")
   966  	}
   967  	// Synchronise with the peer, but cancel afterwards
   968  	if err := tester.sync("peer", nil, mode); err != nil {
   969  		t.Fatalf("failed to synchronise blocks: %v", err)
   970  	}
   971  	tester.downloader.Cancel()
   972  	if !tester.downloader.queue.Idle() {
   973  		t.Errorf("download queue not idle")
   974  	}
   975  }
   976  
   977  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   978  func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
   979  func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
   980  func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
   981  func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
   982  func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
   983  func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
   984  
   985  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   986  	t.Parallel()
   987  
   988  	tester := newTester()
   989  	defer tester.terminate()
   990  
   991  	// Create various peers with various parts of the chain
   992  	targetPeers := 8
   993  	targetBlocks := targetPeers*blockCacheItems - 15
   994  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   995  
   996  	for i := 0; i < targetPeers; i++ {
   997  		id := fmt.Sprintf("peer #%d", i)
   998  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
   999  	}
  1000  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1001  		t.Fatalf("failed to synchronise blocks: %v", err)
  1002  	}
  1003  	assertOwnChain(t, tester, targetBlocks+1)
  1004  }
  1005  
  1006  // Tests that synchronisations behave well in multi-version protocol environments
  1007  // and not wreak havoc on other nodes in the network.
  1008  func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
  1009  func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
  1010  func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
  1011  func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
  1012  func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
  1013  func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
  1014  
  1015  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1016  	t.Parallel()
  1017  
  1018  	tester := newTester()
  1019  	defer tester.terminate()
  1020  
  1021  	// Create a small enough block chain to download
  1022  	targetBlocks := blockCacheItems - 15
  1023  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1024  
  1025  	// Create peers of every type
  1026  	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
  1027  	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
  1028  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1029  
  1030  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1031  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1032  		t.Fatalf("failed to synchronise blocks: %v", err)
  1033  	}
  1034  	assertOwnChain(t, tester, targetBlocks+1)
  1035  
  1036  	// Check that no peers have been dropped off
  1037  	for _, version := range []int{62, 63, 64} {
  1038  		peer := fmt.Sprintf("peer %d", version)
  1039  		if _, ok := tester.peerHashes[peer]; !ok {
  1040  			t.Errorf("%s dropped", peer)
  1041  		}
  1042  	}
  1043  }
  1044  
  1045  // Tests that if a block is empty (e.g. header only), no body request should be
  1046  // made, and instead the header should be assembled into a whole block in itself.
  1047  func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
  1048  func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
  1049  func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
  1050  func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
  1051  func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
  1052  func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
  1053  
  1054  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1055  	t.Parallel()
  1056  
  1057  	tester := newTester()
  1058  	defer tester.terminate()
  1059  
  1060  	// Create a block chain to download
  1061  	targetBlocks := 2*blockCacheItems - 15
  1062  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1063  
  1064  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1065  
  1066  	// Instrument the downloader to signal body requests
  1067  	bodiesHave, receiptsHave := int32(0), int32(0)
  1068  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1069  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1070  	}
  1071  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1072  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1073  	}
  1074  	// Synchronise with the peer and make sure all blocks were retrieved
  1075  	if err := tester.sync("peer", nil, mode); err != nil {
  1076  		t.Fatalf("failed to synchronise blocks: %v", err)
  1077  	}
  1078  	assertOwnChain(t, tester, targetBlocks+1)
  1079  
  1080  	// Validate the number of block bodies that should have been requested
  1081  	bodiesNeeded, receiptsNeeded := 0, 0
  1082  	for _, block := range blocks {
  1083  		if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1084  			bodiesNeeded++
  1085  		}
  1086  	}
  1087  	for _, receipt := range receipts {
  1088  		if mode == FastSync && len(receipt) > 0 {
  1089  			receiptsNeeded++
  1090  		}
  1091  	}
  1092  	if int(bodiesHave) != bodiesNeeded {
  1093  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1094  	}
  1095  	if int(receiptsHave) != receiptsNeeded {
  1096  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1097  	}
  1098  }
  1099  
  1100  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1101  // stalling the downloader by feeding gapped header chains.
  1102  func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
  1103  func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
  1104  func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
  1105  func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
  1106  func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
  1107  func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
  1108  
  1109  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1110  	t.Parallel()
  1111  
  1112  	tester := newTester()
  1113  	defer tester.terminate()
  1114  
  1115  	// Create a small enough block chain to download
  1116  	targetBlocks := blockCacheItems - 15
  1117  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1118  
  1119  	// Attempt a full sync with an attacker feeding gapped headers
  1120  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1121  	missing := targetBlocks / 2
  1122  	delete(tester.peerHeaders["attack"], hashes[missing])
  1123  
  1124  	if err := tester.sync("attack", nil, mode); err == nil {
  1125  		t.Fatalf("succeeded attacker synchronisation")
  1126  	}
  1127  	// Synchronise with the valid peer and make sure sync succeeds
  1128  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1129  	if err := tester.sync("valid", nil, mode); err != nil {
  1130  		t.Fatalf("failed to synchronise blocks: %v", err)
  1131  	}
  1132  	assertOwnChain(t, tester, targetBlocks+1)
  1133  }
  1134  
  1135  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1136  // detects the invalid numbering.
  1137  func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
  1138  func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
  1139  func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
  1140  func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
  1141  func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
  1142  func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
  1143  
  1144  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1145  	t.Parallel()
  1146  
  1147  	tester := newTester()
  1148  	defer tester.terminate()
  1149  
  1150  	// Create a small enough block chain to download
  1151  	targetBlocks := blockCacheItems - 15
  1152  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1153  
  1154  	// Attempt a full sync with an attacker feeding shifted headers
  1155  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1156  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1157  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1158  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1159  
  1160  	if err := tester.sync("attack", nil, mode); err == nil {
  1161  		t.Fatalf("succeeded attacker synchronisation")
  1162  	}
  1163  	// Synchronise with the valid peer and make sure sync succeeds
  1164  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1165  	if err := tester.sync("valid", nil, mode); err != nil {
  1166  		t.Fatalf("failed to synchronise blocks: %v", err)
  1167  	}
  1168  	assertOwnChain(t, tester, targetBlocks+1)
  1169  }
  1170  
  1171  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1172  // for various failure scenarios. Afterwards a full sync is attempted to make
  1173  // sure no state was corrupted.
  1174  func TestInvalidHeaderRollback63Fast(t *testing.T)  { testInvalidHeaderRollback(t, 63, FastSync) }
  1175  func TestInvalidHeaderRollback64Fast(t *testing.T)  { testInvalidHeaderRollback(t, 64, FastSync) }
  1176  func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
  1177  
  1178  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1179  	t.Parallel()
  1180  
  1181  	tester := newTester()
  1182  	defer tester.terminate()
  1183  
  1184  	// Create a small enough block chain to download
  1185  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1186  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1187  
  1188  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1189  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1190  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1191  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1192  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1193  
  1194  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1195  		t.Fatalf("succeeded fast attacker synchronisation")
  1196  	}
  1197  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1198  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1199  	}
  1200  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1201  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1202  	// rolled back, and also the pivot point being reverted to a non-block status.
  1203  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1204  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1205  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1206  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1207  
  1208  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1209  		t.Fatalf("succeeded block attacker synchronisation")
  1210  	}
  1211  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1212  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1213  	}
  1214  	if mode == FastSync {
  1215  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1216  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1217  		}
  1218  	}
  1219  	// Attempt to sync with an attacker that withholds promised blocks after the
  1220  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1221  	// but already imported pivot block.
  1222  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1223  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1224  
  1225  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1226  		for i := missing; i <= len(hashes); i++ {
  1227  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1228  		}
  1229  		tester.downloader.syncInitHook = nil
  1230  	}
  1231  
  1232  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1233  		t.Fatalf("succeeded withholding attacker synchronisation")
  1234  	}
  1235  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1236  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1237  	}
  1238  	if mode == FastSync {
  1239  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1240  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1241  		}
  1242  	}
  1243  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1244  	// rollback should also disable fast syncing for this process, verify that we
  1245  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1246  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1247  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1248  	if err := tester.sync("valid", nil, mode); err != nil {
  1249  		t.Fatalf("failed to synchronise blocks: %v", err)
  1250  	}
  1251  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1252  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1253  	}
  1254  	if mode != LightSync {
  1255  		if bs := len(tester.ownBlocks); bs != len(blocks) {
  1256  			t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1257  		}
  1258  	}
  1259  }
  1260  
  1261  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1262  // afterwards by not sending any useful hashes.
  1263  func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
  1264  func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
  1265  func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
  1266  func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
  1267  func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
  1268  func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
  1269  
  1270  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1271  	t.Parallel()
  1272  
  1273  	tester := newTester()
  1274  	defer tester.terminate()
  1275  
  1276  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1277  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1278  
  1279  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  1280  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1281  	}
  1282  }
  1283  
  1284  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1285  func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
  1286  func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
  1287  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1288  
  1289  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1290  	t.Parallel()
  1291  
  1292  	// Define the disconnection requirement for individual hash fetch errors
  1293  	tests := []struct {
  1294  		result error
  1295  		drop   bool
  1296  	}{
  1297  		{nil, false},                        // Sync succeeded, all is well
  1298  		{errBusy, false},                    // Sync is already in progress, no problem
  1299  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1300  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1301  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1302  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1303  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1304  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1305  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1306  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1307  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1308  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1309  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1310  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1311  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1312  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1313  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1314  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1315  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1316  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1317  	}
  1318  	// Run the tests and check disconnection status
  1319  	tester := newTester()
  1320  	defer tester.terminate()
  1321  
  1322  	for i, tt := range tests {
  1323  		// Register a new peer and ensure it's presence
  1324  		id := fmt.Sprintf("test %d", i)
  1325  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1326  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1327  		}
  1328  		if _, ok := tester.peerHashes[id]; !ok {
  1329  			t.Fatalf("test %d: registered peer not found", i)
  1330  		}
  1331  		// Simulate a synchronisation and check the required result
  1332  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1333  
  1334  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1335  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1336  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1337  		}
  1338  	}
  1339  }
  1340  
  1341  // Tests that synchronisation progress (origin block number, current block number
  1342  // and highest block number) is tracked and updated correctly.
  1343  func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
  1344  func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
  1345  func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
  1346  func TestSyncProgress64Full(t *testing.T)  { testSyncProgress(t, 64, FullSync) }
  1347  func TestSyncProgress64Fast(t *testing.T)  { testSyncProgress(t, 64, FastSync) }
  1348  func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
  1349  
  1350  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1351  	t.Parallel()
  1352  
  1353  	tester := newTester()
  1354  	defer tester.terminate()
  1355  
  1356  	// Create a small enough block chain to download
  1357  	targetBlocks := blockCacheItems - 15
  1358  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1359  
  1360  	// Set a sync init hook to catch progress changes
  1361  	starting := make(chan struct{})
  1362  	progress := make(chan struct{})
  1363  
  1364  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1365  		starting <- struct{}{}
  1366  		<-progress
  1367  	}
  1368  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1369  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1370  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1371  	}
  1372  	// Synchronise half the blocks and check initial progress
  1373  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1374  	pending := new(sync.WaitGroup)
  1375  	pending.Add(1)
  1376  
  1377  	go func() {
  1378  		defer pending.Done()
  1379  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1380  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1381  		}
  1382  	}()
  1383  	<-starting
  1384  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1385  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1386  	}
  1387  	progress <- struct{}{}
  1388  	pending.Wait()
  1389  
  1390  	// Synchronise all the blocks and check continuation progress
  1391  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1392  	pending.Add(1)
  1393  
  1394  	go func() {
  1395  		defer pending.Done()
  1396  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1397  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1398  		}
  1399  	}()
  1400  	<-starting
  1401  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1402  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1403  	}
  1404  	progress <- struct{}{}
  1405  	pending.Wait()
  1406  
  1407  	// Check final progress after successful sync
  1408  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1409  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1410  	}
  1411  }
  1412  
  1413  // Tests that synchronisation progress (origin block number and highest block
  1414  // number) is tracked and updated correctly in case of a fork (or manual head
  1415  // revertal).
  1416  func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
  1417  func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
  1418  func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
  1419  func TestForkedSyncProgress64Full(t *testing.T)  { testForkedSyncProgress(t, 64, FullSync) }
  1420  func TestForkedSyncProgress64Fast(t *testing.T)  { testForkedSyncProgress(t, 64, FastSync) }
  1421  func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
  1422  
  1423  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1424  	t.Parallel()
  1425  
  1426  	tester := newTester()
  1427  	defer tester.terminate()
  1428  
  1429  	// Create a forked chain to simulate origin revertal
  1430  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1431  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1432  
  1433  	// Set a sync init hook to catch progress changes
  1434  	starting := make(chan struct{})
  1435  	progress := make(chan struct{})
  1436  
  1437  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1438  		starting <- struct{}{}
  1439  		<-progress
  1440  	}
  1441  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1442  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1443  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1444  	}
  1445  	// Synchronise with one of the forks and check progress
  1446  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1447  	pending := new(sync.WaitGroup)
  1448  	pending.Add(1)
  1449  
  1450  	go func() {
  1451  		defer pending.Done()
  1452  		if err := tester.sync("fork A", nil, mode); err != nil {
  1453  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1454  		}
  1455  	}()
  1456  	<-starting
  1457  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1458  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1459  	}
  1460  	progress <- struct{}{}
  1461  	pending.Wait()
  1462  
  1463  	// Simulate a successful sync above the fork
  1464  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1465  
  1466  	// Synchronise with the second fork and check progress resets
  1467  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1468  	pending.Add(1)
  1469  
  1470  	go func() {
  1471  		defer pending.Done()
  1472  		if err := tester.sync("fork B", nil, mode); err != nil {
  1473  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1474  		}
  1475  	}()
  1476  	<-starting
  1477  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1478  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1479  	}
  1480  	progress <- struct{}{}
  1481  	pending.Wait()
  1482  
  1483  	// Check final progress after successful sync
  1484  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1485  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1486  	}
  1487  }
  1488  
  1489  // Tests that if synchronisation is aborted due to some failure, then the progress
  1490  // origin is not updated in the next sync cycle, as it should be considered the
  1491  // continuation of the previous sync and not a new instance.
  1492  func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
  1493  func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
  1494  func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
  1495  func TestFailedSyncProgress64Full(t *testing.T)  { testFailedSyncProgress(t, 64, FullSync) }
  1496  func TestFailedSyncProgress64Fast(t *testing.T)  { testFailedSyncProgress(t, 64, FastSync) }
  1497  func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
  1498  
  1499  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1500  	t.Parallel()
  1501  
  1502  	tester := newTester()
  1503  	defer tester.terminate()
  1504  
  1505  	// Create a small enough block chain to download
  1506  	targetBlocks := blockCacheItems - 15
  1507  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1508  
  1509  	// Set a sync init hook to catch progress changes
  1510  	starting := make(chan struct{})
  1511  	progress := make(chan struct{})
  1512  
  1513  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1514  		starting <- struct{}{}
  1515  		<-progress
  1516  	}
  1517  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1518  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1519  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1520  	}
  1521  	// Attempt a full sync with a faulty peer
  1522  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1523  	missing := targetBlocks / 2
  1524  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1525  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1526  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1527  
  1528  	pending := new(sync.WaitGroup)
  1529  	pending.Add(1)
  1530  
  1531  	go func() {
  1532  		defer pending.Done()
  1533  		if err := tester.sync("faulty", nil, mode); err == nil {
  1534  			panic("succeeded faulty synchronisation")
  1535  		}
  1536  	}()
  1537  	<-starting
  1538  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1539  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1540  	}
  1541  	progress <- struct{}{}
  1542  	pending.Wait()
  1543  
  1544  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1545  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1546  	pending.Add(1)
  1547  
  1548  	go func() {
  1549  		defer pending.Done()
  1550  		if err := tester.sync("valid", nil, mode); err != nil {
  1551  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1552  		}
  1553  	}()
  1554  	<-starting
  1555  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1556  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1557  	}
  1558  	progress <- struct{}{}
  1559  	pending.Wait()
  1560  
  1561  	// Check final progress after successful sync
  1562  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1563  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1564  	}
  1565  }
  1566  
  1567  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1568  // the progress height is successfully reduced at the next sync invocation.
  1569  func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
  1570  func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
  1571  func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
  1572  func TestFakedSyncProgress64Full(t *testing.T)  { testFakedSyncProgress(t, 64, FullSync) }
  1573  func TestFakedSyncProgress64Fast(t *testing.T)  { testFakedSyncProgress(t, 64, FastSync) }
  1574  func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
  1575  
  1576  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1577  	t.Parallel()
  1578  
  1579  	tester := newTester()
  1580  	defer tester.terminate()
  1581  
  1582  	// Create a small block chain
  1583  	targetBlocks := blockCacheItems - 15
  1584  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1585  
  1586  	// Set a sync init hook to catch progress changes
  1587  	starting := make(chan struct{})
  1588  	progress := make(chan struct{})
  1589  
  1590  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1591  		starting <- struct{}{}
  1592  		<-progress
  1593  	}
  1594  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1595  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1596  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1597  	}
  1598  	//  Create and sync with an attacker that promises a higher chain than available
  1599  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1600  	for i := 1; i < 3; i++ {
  1601  		delete(tester.peerHeaders["attack"], hashes[i])
  1602  		delete(tester.peerBlocks["attack"], hashes[i])
  1603  		delete(tester.peerReceipts["attack"], hashes[i])
  1604  	}
  1605  
  1606  	pending := new(sync.WaitGroup)
  1607  	pending.Add(1)
  1608  
  1609  	go func() {
  1610  		defer pending.Done()
  1611  		if err := tester.sync("attack", nil, mode); err == nil {
  1612  			panic("succeeded attacker synchronisation")
  1613  		}
  1614  	}()
  1615  	<-starting
  1616  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1617  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1618  	}
  1619  	progress <- struct{}{}
  1620  	pending.Wait()
  1621  
  1622  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1623  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1624  	pending.Add(1)
  1625  
  1626  	go func() {
  1627  		defer pending.Done()
  1628  		if err := tester.sync("valid", nil, mode); err != nil {
  1629  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1630  		}
  1631  	}()
  1632  	<-starting
  1633  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1634  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1635  	}
  1636  	progress <- struct{}{}
  1637  	pending.Wait()
  1638  
  1639  	// Check final progress after successful sync
  1640  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1641  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1642  	}
  1643  }
  1644  
  1645  // This test reproduces an issue where unexpected deliveries would
  1646  // block indefinitely if they arrived at the right time.
  1647  // We use data driven subtests to manage this so that it will be parallel on its own
  1648  // and not with the other tests, avoiding intermittent failures.
  1649  func TestDeliverHeadersHang(t *testing.T) {
  1650  	testCases := []struct {
  1651  		protocol int
  1652  		syncMode SyncMode
  1653  	}{
  1654  		{62, FullSync},
  1655  		{63, FullSync},
  1656  		{63, FastSync},
  1657  		{64, FullSync},
  1658  		{64, FastSync},
  1659  		{64, LightSync},
  1660  	}
  1661  	for _, tc := range testCases {
  1662  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1663  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1664  		})
  1665  	}
  1666  }
  1667  
  1668  type floodingTestPeer struct {
  1669  	peer   Peer
  1670  	tester *downloadTester
  1671  	pend   sync.WaitGroup
  1672  }
  1673  
  1674  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1675  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1676  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1677  }
  1678  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1679  	return ftp.peer.RequestBodies(hashes)
  1680  }
  1681  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1682  	return ftp.peer.RequestReceipts(hashes)
  1683  }
  1684  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1685  	return ftp.peer.RequestNodeData(hashes)
  1686  }
  1687  
  1688  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1689  	deliveriesDone := make(chan struct{}, 500)
  1690  	for i := 0; i < cap(deliveriesDone); i++ {
  1691  		peer := fmt.Sprintf("fake-peer%d", i)
  1692  		ftp.pend.Add(1)
  1693  
  1694  		go func() {
  1695  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1696  			deliveriesDone <- struct{}{}
  1697  			ftp.pend.Done()
  1698  		}()
  1699  	}
  1700  	// Deliver the actual requested headers.
  1701  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1702  	// None of the extra deliveries should block.
  1703  	timeout := time.After(60 * time.Second)
  1704  	for i := 0; i < cap(deliveriesDone); i++ {
  1705  		select {
  1706  		case <-deliveriesDone:
  1707  		case <-timeout:
  1708  			panic("blocked")
  1709  		}
  1710  	}
  1711  	return nil
  1712  }
  1713  
  1714  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1715  	t.Parallel()
  1716  
  1717  	master := newTester()
  1718  	defer master.terminate()
  1719  
  1720  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1721  	for i := 0; i < 200; i++ {
  1722  		tester := newTester()
  1723  		tester.peerDb = master.peerDb
  1724  
  1725  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1726  		// Whenever the downloader requests headers, flood it with
  1727  		// a lot of unrequested header deliveries.
  1728  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1729  			peer:   tester.downloader.peers.peers["peer"].peer,
  1730  			tester: tester,
  1731  		}
  1732  		if err := tester.sync("peer", nil, mode); err != nil {
  1733  			t.Errorf("test %d: sync failed: %v", i, err)
  1734  		}
  1735  		tester.terminate()
  1736  
  1737  		// Flush all goroutines to prevent messing with subsequent tests
  1738  		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1739  	}
  1740  }