github.com/aquanetwork/aquachain@v1.7.8/aqua/downloader/downloader_test.go (about)

     1  // Copyright 2015 The aquachain Authors
     2  // This file is part of the aquachain library.
     3  //
     4  // The aquachain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The aquachain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"gitlab.com/aquachain/aquachain/aqua/event"
    29  	"gitlab.com/aquachain/aquachain/aquadb"
    30  	"gitlab.com/aquachain/aquachain/common"
    31  	"gitlab.com/aquachain/aquachain/consensus/aquahash"
    32  	"gitlab.com/aquachain/aquachain/core"
    33  	"gitlab.com/aquachain/aquachain/core/types"
    34  	"gitlab.com/aquachain/aquachain/crypto"
    35  	"gitlab.com/aquachain/aquachain/params"
    36  	"gitlab.com/aquachain/aquachain/trie"
    37  )
    38  
    39  var (
    40  	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    41  	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
    42  )
    43  
    44  // Reduce some of the parameters to make the tester faster.
    45  func init() {
    46  	MaxForkAncestry = uint64(10000)
    47  	blockCacheItems = 1024
    48  	fsHeaderContCheck = 500 * time.Millisecond
    49  }
    50  
    51  // downloadTester is a test simulator for mocking out local block chain.
    52  type downloadTester struct {
    53  	downloader *Downloader
    54  
    55  	genesis *types.Block    // Genesis blocks used by the tester and peers
    56  	stateDb aquadb.Database // Database used by the tester for syncing from peers
    57  	peerDb  aquadb.Database // Database of the peers containing all data
    58  
    59  	ownHashes   []common.Hash                  // Hash chain belonging to the tester
    60  	ownHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester
    61  	ownBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester
    62  	ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
    63  	ownChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain
    64  
    65  	peerHashes   map[string][]common.Hash                  // Hash chain belonging to different test peers
    66  	peerHeaders  map[string]map[common.Hash]*types.Header  // Headers belonging to different test peers
    67  	peerBlocks   map[string]map[common.Hash]*types.Block   // Blocks belonging to different test peers
    68  	peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
    69  	peerChainTds map[string]map[common.Hash]*big.Int       // Total difficulties of the blocks in the peer chains
    70  
    71  	peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return
    72  
    73  	lock sync.RWMutex
    74  }
    75  
    76  // newTester creates a new downloader test mocker.
    77  func newTester() *downloadTester {
    78  	testdb := aquadb.NewMemDatabase()
    79  	genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
    80  
    81  	tester := &downloadTester{
    82  		genesis:           genesis,
    83  		peerDb:            testdb,
    84  		ownHashes:         []common.Hash{genesis.Hash()},
    85  		ownHeaders:        map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
    86  		ownBlocks:         map[common.Hash]*types.Block{genesis.Hash(): genesis},
    87  		ownReceipts:       map[common.Hash]types.Receipts{genesis.Hash(): nil},
    88  		ownChainTd:        map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
    89  		peerHashes:        make(map[string][]common.Hash),
    90  		peerHeaders:       make(map[string]map[common.Hash]*types.Header),
    91  		peerBlocks:        make(map[string]map[common.Hash]*types.Block),
    92  		peerReceipts:      make(map[string]map[common.Hash]types.Receipts),
    93  		peerChainTds:      make(map[string]map[common.Hash]*big.Int),
    94  		peerMissingStates: make(map[string]map[common.Hash]bool),
    95  	}
    96  	tester.stateDb = aquadb.NewMemDatabase()
    97  	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
    98  
    99  	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
   100  
   101  	return tester
   102  }
   103  func (dl *downloadTester) GetBlockVersion(height *big.Int) params.HeaderVersion {
   104  	return params.TestChainConfig.GetBlockVersion(height)
   105  }
   106  
   107  // makeChain creates a chain of n blocks starting at and including parent.
   108  // the returned hash chain is ordered head->parent. In addition, every 3rd block
   109  // contains a transaction and every 5th an uncle to allow testing correct block
   110  // reassembly.
   111  func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
   112  	// Generate the block chain
   113  	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, aquahash.NewFaker(), dl.peerDb, n, func(i int, block *core.BlockGen) {
   114  		block.SetCoinbase(common.Address{seed})
   115  
   116  		// If a heavy chain is requested, delay blocks to raise difficulty
   117  		if heavy {
   118  			block.OffsetTime(-1)
   119  		}
   120  		// If the block number is multiple of 3, send a bonus transaction to the miner
   121  		if parent == dl.genesis && i%3 == 0 {
   122  			signer := types.MakeSigner(params.TestChainConfig, block.Number())
   123  			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
   124  			if err != nil {
   125  				panic(err)
   126  			}
   127  			block.AddTx(tx)
   128  		}
   129  		// If the block number is a multiple of 5, add a bonus uncle to the block
   130  		if i > 0 && i%5 == 0 {
   131  			block.AddUncle(&types.Header{
   132  				ParentHash: block.PrevBlock(i - 1).Hash(),
   133  				Number:     big.NewInt(block.Number().Int64() - 1),
   134  			})
   135  		}
   136  	})
   137  	// Convert the block-chain into a hash-chain and header/block maps
   138  	hashes := make([]common.Hash, n+1)
   139  	hashes[len(hashes)-1] = parent.Hash()
   140  
   141  	headerm := make(map[common.Hash]*types.Header, n+1)
   142  	headerm[parent.Hash()] = parent.Header()
   143  
   144  	blockm := make(map[common.Hash]*types.Block, n+1)
   145  	blockm[parent.Hash()] = parent
   146  
   147  	receiptm := make(map[common.Hash]types.Receipts, n+1)
   148  	receiptm[parent.Hash()] = parentReceipts
   149  
   150  	for i, b := range blocks {
   151  		hashes[len(hashes)-i-2] = b.Hash()
   152  		headerm[b.Hash()] = b.Header()
   153  		blockm[b.Hash()] = b
   154  		receiptm[b.Hash()] = receipts[i]
   155  	}
   156  	return hashes, headerm, blockm, receiptm
   157  }
   158  
   159  // makeChainFork creates two chains of length n, such that h1[:f] and
   160  // h2[:f] are different but have a common suffix of length n-f.
   161  func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
   162  	// Create the common suffix
   163  	hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
   164  
   165  	// Create the forks, making the second heavyer if non balanced forks were requested
   166  	hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
   167  	hashes1 = append(hashes1, hashes[1:]...)
   168  
   169  	heavy := false
   170  	if !balanced {
   171  		heavy = true
   172  	}
   173  	hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
   174  	hashes2 = append(hashes2, hashes[1:]...)
   175  
   176  	for hash, header := range headers {
   177  		headers1[hash] = header
   178  		headers2[hash] = header
   179  	}
   180  	for hash, block := range blocks {
   181  		blocks1[hash] = block
   182  		blocks2[hash] = block
   183  	}
   184  	for hash, receipt := range receipts {
   185  		receipts1[hash] = receipt
   186  		receipts2[hash] = receipt
   187  	}
   188  	return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
   189  }
   190  
   191  // terminate aborts any operations on the embedded downloader and releases all
   192  // held resources.
   193  func (dl *downloadTester) terminate() {
   194  	dl.downloader.Terminate()
   195  }
   196  
   197  // sync starts synchronizing with a remote peer, blocking until it completes.
   198  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   199  	dl.lock.RLock()
   200  	hash := dl.peerHashes[id][0]
   201  	// If no particular TD was requested, load from the peer's blockchain
   202  	if td == nil {
   203  		td = big.NewInt(1)
   204  		if diff, ok := dl.peerChainTds[id][hash]; ok {
   205  			td = diff
   206  		}
   207  	}
   208  	dl.lock.RUnlock()
   209  
   210  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   211  	err := dl.downloader.synchronise(id, hash, td, mode)
   212  	select {
   213  	case <-dl.downloader.cancelCh:
   214  		// Ok, downloader fully cancelled after sync cycle
   215  	default:
   216  		// Downloader is still accepting packets, can block a peer up
   217  		panic("downloader active post sync cycle") // panic will be caught by tester
   218  	}
   219  	return err
   220  }
   221  
   222  // HasHeader checks if a header is present in the testers canonical chain.
   223  func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
   224  	return dl.GetHeaderByHash(hash) != nil
   225  }
   226  
   227  // HasBlock checks if a block is present in the testers canonical chain.
   228  func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
   229  	return dl.GetBlockByHash(hash) != nil
   230  }
   231  
   232  // GetHeader retrieves a header from the testers canonical chain.
   233  func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
   234  	dl.lock.RLock()
   235  	defer dl.lock.RUnlock()
   236  
   237  	return dl.ownHeaders[hash]
   238  }
   239  
   240  // GetBlock retrieves a block from the testers canonical chain.
   241  func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
   242  	dl.lock.RLock()
   243  	defer dl.lock.RUnlock()
   244  
   245  	return dl.ownBlocks[hash]
   246  }
   247  
   248  // CurrentHeader retrieves the current head header from the canonical chain.
   249  func (dl *downloadTester) CurrentHeader() *types.Header {
   250  	dl.lock.RLock()
   251  	defer dl.lock.RUnlock()
   252  
   253  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   254  		if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
   255  			return header
   256  		}
   257  	}
   258  	return dl.genesis.Header()
   259  }
   260  
   261  // CurrentBlock retrieves the current head block from the canonical chain.
   262  func (dl *downloadTester) CurrentBlock() *types.Block {
   263  	dl.lock.RLock()
   264  	defer dl.lock.RUnlock()
   265  
   266  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   267  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   268  			if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
   269  				return block
   270  			}
   271  		}
   272  	}
   273  	return dl.genesis
   274  }
   275  
   276  // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
   277  func (dl *downloadTester) CurrentFastBlock() *types.Block {
   278  	dl.lock.RLock()
   279  	defer dl.lock.RUnlock()
   280  
   281  	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
   282  		if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
   283  			return block
   284  		}
   285  	}
   286  	return dl.genesis
   287  }
   288  
   289  // FastSyncCommitHead manually sets the head block to a given hash.
   290  func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
   291  	// For now only check that the state trie is correct
   292  	if block := dl.GetBlockByHash(hash); block != nil {
   293  		_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0)
   294  		return err
   295  	}
   296  	return fmt.Errorf("non existent block: %x", hash[:4])
   297  }
   298  
   299  // GetTd retrieves the block's total difficulty from the canonical chain.
   300  func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
   301  	dl.lock.RLock()
   302  	defer dl.lock.RUnlock()
   303  
   304  	return dl.ownChainTd[hash]
   305  }
   306  
   307  // InsertHeaderChain injects a new batch of headers into the simulated chain.
   308  func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) {
   309  	dl.lock.Lock()
   310  	defer dl.lock.Unlock()
   311  
   312  	// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
   313  	if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
   314  		return 0, errors.New("unknown parent")
   315  	}
   316  	for i := 1; i < len(headers); i++ {
   317  		if headers[i].ParentHash != headers[i-1].Hash() {
   318  			return i, errors.New("unknown parent")
   319  		}
   320  	}
   321  	// Do a full insert if pre-checks passed
   322  	for i, header := range headers {
   323  		if _, ok := dl.ownHeaders[header.Hash()]; ok {
   324  			continue
   325  		}
   326  		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
   327  			return i, errors.New("unknown parent")
   328  		}
   329  		dl.ownHashes = append(dl.ownHashes, header.Hash())
   330  		dl.ownHeaders[header.Hash()] = header
   331  		dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
   332  	}
   333  	return len(headers), nil
   334  }
   335  
   336  // InsertChain injects a new batch of blocks into the simulated chain.
   337  func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) {
   338  	dl.lock.Lock()
   339  	defer dl.lock.Unlock()
   340  
   341  	for i, block := range blocks {
   342  		if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
   343  			return i, errors.New("unknown parent")
   344  		} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
   345  			return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
   346  		}
   347  		if _, ok := dl.ownHeaders[block.Hash()]; !ok {
   348  			dl.ownHashes = append(dl.ownHashes, block.Hash())
   349  			dl.ownHeaders[block.Hash()] = block.Header()
   350  		}
   351  		dl.ownBlocks[block.Hash()] = block
   352  		dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
   353  		dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
   354  	}
   355  	return len(blocks), nil
   356  }
   357  
   358  // InsertReceiptChain injects a new batch of receipts into the simulated chain.
   359  func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) {
   360  	dl.lock.Lock()
   361  	defer dl.lock.Unlock()
   362  
   363  	for i := 0; i < len(blocks) && i < len(receipts); i++ {
   364  		if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
   365  			return i, errors.New("unknown owner")
   366  		}
   367  		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
   368  			return i, errors.New("unknown parent")
   369  		}
   370  		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
   371  		dl.ownReceipts[blocks[i].Hash()] = receipts[i]
   372  	}
   373  	return len(blocks), nil
   374  }
   375  
   376  // Rollback removes some recently added elements from the chain.
   377  func (dl *downloadTester) Rollback(hashes []common.Hash) {
   378  	dl.lock.Lock()
   379  	defer dl.lock.Unlock()
   380  
   381  	for i := len(hashes) - 1; i >= 0; i-- {
   382  		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
   383  			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
   384  		}
   385  		delete(dl.ownChainTd, hashes[i])
   386  		delete(dl.ownHeaders, hashes[i])
   387  		delete(dl.ownReceipts, hashes[i])
   388  		delete(dl.ownBlocks, hashes[i])
   389  	}
   390  }
   391  
   392  // newPeer registers a new block download source into the downloader.
   393  func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
   394  	return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
   395  }
   396  
   397  // newSlowPeer registers a new block download source into the downloader, with a
   398  // specific delay time on processing the network packets sent to it, simulating
   399  // potentially slow network IO.
   400  func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
   401  	dl.lock.Lock()
   402  	defer dl.lock.Unlock()
   403  
   404  	var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay})
   405  	if err == nil {
   406  		// Assign the owned hashes, headers and blocks to the peer (deep copy)
   407  		dl.peerHashes[id] = make([]common.Hash, len(hashes))
   408  		copy(dl.peerHashes[id], hashes)
   409  
   410  		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
   411  		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
   412  		dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
   413  		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
   414  		dl.peerMissingStates[id] = make(map[common.Hash]bool)
   415  
   416  		genesis := hashes[len(hashes)-1]
   417  		if header := headers[genesis]; header != nil {
   418  			dl.peerHeaders[id][genesis] = header
   419  			dl.peerChainTds[id][genesis] = header.Difficulty
   420  		}
   421  		if block := blocks[genesis]; block != nil {
   422  			dl.peerBlocks[id][genesis] = block
   423  			dl.peerChainTds[id][genesis] = block.Difficulty()
   424  		}
   425  
   426  		for i := len(hashes) - 2; i >= 0; i-- {
   427  			hash := hashes[i]
   428  
   429  			if header, ok := headers[hash]; ok {
   430  				dl.peerHeaders[id][hash] = header
   431  				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
   432  					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
   433  				}
   434  			}
   435  			if block, ok := blocks[hash]; ok {
   436  				dl.peerBlocks[id][hash] = block
   437  				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
   438  					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
   439  				}
   440  			}
   441  			if receipt, ok := receipts[hash]; ok {
   442  				dl.peerReceipts[id][hash] = receipt
   443  			}
   444  		}
   445  	}
   446  	return err
   447  }
   448  
   449  // dropPeer simulates a hard peer removal from the connection pool.
   450  func (dl *downloadTester) dropPeer(id string) {
   451  	dl.lock.Lock()
   452  	defer dl.lock.Unlock()
   453  
   454  	delete(dl.peerHashes, id)
   455  	delete(dl.peerHeaders, id)
   456  	delete(dl.peerBlocks, id)
   457  	delete(dl.peerChainTds, id)
   458  
   459  	dl.downloader.UnregisterPeer(id)
   460  }
   461  
   462  type downloadTesterPeer struct {
   463  	dl    *downloadTester
   464  	id    string
   465  	delay time.Duration
   466  	lock  sync.RWMutex
   467  }
   468  
   469  // setDelay is a thread safe setter for the network delay value.
   470  //func (dlp *downloadTesterPeer) setDelay(delay time.Duration) {
   471  //	dlp.lock.Lock()
   472  //	defer dlp.lock.Unlock()
   473  //
   474  //	dlp.delay = delay
   475  //}
   476  
   477  // waitDelay is a thread safe way to sleep for the configured time.
   478  func (dlp *downloadTesterPeer) waitDelay() {
   479  	dlp.lock.RLock()
   480  	delay := dlp.delay
   481  	dlp.lock.RUnlock()
   482  
   483  	time.Sleep(delay)
   484  }
   485  
   486  // Head constructs a function to retrieve a peer's current head hash
   487  // and total difficulty.
   488  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   489  	dlp.dl.lock.RLock()
   490  	defer dlp.dl.lock.RUnlock()
   491  
   492  	return dlp.dl.peerHashes[dlp.id][0], nil
   493  }
   494  
   495  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   496  // origin; associated with a particular peer in the download tester. The returned
   497  // function can be used to retrieve batches of headers from the particular peer.
   498  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
   499  	// Find the canonical number of the hash
   500  	dlp.dl.lock.RLock()
   501  	number := uint64(0)
   502  	for num, hash := range dlp.dl.peerHashes[dlp.id] {
   503  		if hash == origin {
   504  			number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
   505  			break
   506  		}
   507  	}
   508  	dlp.dl.lock.RUnlock()
   509  
   510  	// Use the absolute header fetcher to satisfy the query
   511  	return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
   512  }
   513  
   514  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   515  // origin; associated with a particular peer in the download tester. The returned
   516  // function can be used to retrieve batches of headers from the particular peer.
   517  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
   518  	dlp.waitDelay()
   519  
   520  	dlp.dl.lock.RLock()
   521  	defer dlp.dl.lock.RUnlock()
   522  
   523  	// Gather the next batch of headers
   524  	hashes := dlp.dl.peerHashes[dlp.id]
   525  	headers := dlp.dl.peerHeaders[dlp.id]
   526  	result := make([]*types.Header, 0, amount)
   527  	for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
   528  		if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
   529  			result = append(result, header)
   530  		}
   531  	}
   532  	// Delay delivery a bit to allow attacks to unfold
   533  	go func() {
   534  		time.Sleep(time.Millisecond)
   535  		dlp.dl.downloader.DeliverHeaders(dlp.id, result)
   536  	}()
   537  	return nil
   538  }
   539  
   540  // RequestBodies constructs a getBlockBodies method associated with a particular
   541  // peer in the download tester. The returned function can be used to retrieve
   542  // batches of block bodies from the particularly requested peer.
   543  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
   544  	dlp.waitDelay()
   545  
   546  	dlp.dl.lock.RLock()
   547  	defer dlp.dl.lock.RUnlock()
   548  
   549  	blocks := dlp.dl.peerBlocks[dlp.id]
   550  
   551  	transactions := make([][]*types.Transaction, 0, len(hashes))
   552  	uncles := make([][]*types.Header, 0, len(hashes))
   553  
   554  	for _, hash := range hashes {
   555  		if block, ok := blocks[hash]; ok {
   556  			transactions = append(transactions, block.Transactions())
   557  			uncles = append(uncles, block.Uncles())
   558  		}
   559  	}
   560  	go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
   561  
   562  	return nil
   563  }
   564  
   565  // RequestReceipts constructs a getReceipts method associated with a particular
   566  // peer in the download tester. The returned function can be used to retrieve
   567  // batches of block receipts from the particularly requested peer.
   568  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
   569  	dlp.waitDelay()
   570  
   571  	dlp.dl.lock.RLock()
   572  	defer dlp.dl.lock.RUnlock()
   573  
   574  	receipts := dlp.dl.peerReceipts[dlp.id]
   575  
   576  	results := make([][]*types.Receipt, 0, len(hashes))
   577  	for _, hash := range hashes {
   578  		if receipt, ok := receipts[hash]; ok {
   579  			results = append(results, receipt)
   580  		}
   581  	}
   582  	go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
   583  
   584  	return nil
   585  }
   586  
   587  // RequestNodeData constructs a getNodeData method associated with a particular
   588  // peer in the download tester. The returned function can be used to retrieve
   589  // batches of node state data from the particularly requested peer.
   590  func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
   591  	dlp.waitDelay()
   592  
   593  	dlp.dl.lock.RLock()
   594  	defer dlp.dl.lock.RUnlock()
   595  
   596  	results := make([][]byte, 0, len(hashes))
   597  	for _, hash := range hashes {
   598  		if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
   599  			if !dlp.dl.peerMissingStates[dlp.id][hash] {
   600  				results = append(results, data)
   601  			}
   602  		}
   603  	}
   604  	go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
   605  
   606  	return nil
   607  }
   608  
   609  // assertOwnChain checks if the local chain contains the correct number of items
   610  // of the various chain components.
   611  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   612  	assertOwnForkedChain(t, tester, 1, []int{length})
   613  }
   614  
   615  // assertOwnForkedChain checks if the local forked chain contains the correct
   616  // number of items of the various chain components.
   617  func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
   618  	// Initialize the counters for the first fork
   619  	headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
   620  
   621  	if receipts < 0 {
   622  		receipts = 1
   623  	}
   624  	// Update the counters for each subsequent fork
   625  	for _, length := range lengths[1:] {
   626  		headers += length - common
   627  		blocks += length - common
   628  		receipts += length - common - fsMinFullBlocks
   629  	}
   630  	if tester.downloader.mode == FullSync {
   631  		receipts = 1
   632  	}
   633  	if hs := len(tester.ownHeaders); hs != headers {
   634  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   635  	}
   636  	if bs := len(tester.ownBlocks); bs != blocks {
   637  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   638  	}
   639  	if rs := len(tester.ownReceipts); rs != receipts {
   640  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   641  	}
   642  	// Verify the state trie too for fast syncs
   643  	/*if tester.downloader.mode == FastSync {
   644  		pivot := uint64(0)
   645  		var index int
   646  		if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common {
   647  			index = pivot
   648  		} else {
   649  			index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot)
   650  		}
   651  		if index > 0 {
   652  			if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil {
   653  				t.Fatalf("state reconstruction failed: %v", err)
   654  			}
   655  		}
   656  	}*/
   657  }
   658  
   659  // Tests that simple synchronization against a canonical chain works correctly.
   660  // In this test common ancestor lookup should be short circuited and not require
   661  // binary searching.
   662  func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonicalSynchronisation(t, 65, FullSync) }
   663  
   664  // func TestCanonicalSynchronisation65Fast(t *testing.T)  { testCanonicalSynchronisation(t, 65, FastSync) }
   665  func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) }
   666  func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) }
   667  
   668  func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   669  	t.Parallel()
   670  
   671  	tester := newTester()
   672  	defer tester.terminate()
   673  
   674  	// Create a small enough block chain to download
   675  	targetBlocks := blockCacheItems - 15
   676  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   677  
   678  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   679  
   680  	// Synchronise with the peer and make sure all relevant data was retrieved
   681  	if err := tester.sync("peer", nil, mode); err != nil {
   682  		t.Fatalf("failed to synchronise blocks: %v", err)
   683  	}
   684  	assertOwnChain(t, tester, targetBlocks+1)
   685  }
   686  
   687  // Tests that if a large batch of blocks are being downloaded, it is throttled
   688  // until the cached blocks are retrieved.
   689  func TestThrottling65Full(t *testing.T) { testThrottling(t, 65, FullSync) }
   690  
   691  // func TestThrottling65Fast(t *testing.T) { testThrottling(t, 65, FastSync) }
   692  func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
   693  func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
   694  
   695  func testThrottling(t *testing.T, protocol int, mode SyncMode) {
   696  	t.Parallel()
   697  	tester := newTester()
   698  	defer tester.terminate()
   699  
   700  	// Create a long block chain to download and the tester
   701  	targetBlocks := 8 * blockCacheItems
   702  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   703  
   704  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   705  
   706  	// Wrap the importer to allow stepping
   707  	blocked, proceed := uint32(0), make(chan struct{})
   708  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   709  		atomic.StoreUint32(&blocked, uint32(len(results)))
   710  		<-proceed
   711  	}
   712  	// Start a synchronisation concurrently
   713  	errc := make(chan error)
   714  	go func() {
   715  		errc <- tester.sync("peer", nil, mode)
   716  	}()
   717  	// Iteratively take some blocks, always checking the retrieval count
   718  	for {
   719  		// Check the retrieval count synchronously (! reason for this ugly block)
   720  		tester.lock.RLock()
   721  		retrieved := len(tester.ownBlocks)
   722  		tester.lock.RUnlock()
   723  		if retrieved >= targetBlocks+1 {
   724  			break
   725  		}
   726  		// Wait a bit for sync to throttle itself
   727  		var cached, frozen int
   728  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   729  			time.Sleep(25 * time.Millisecond)
   730  
   731  			tester.lock.Lock()
   732  			tester.downloader.queue.lock.Lock()
   733  			cached = len(tester.downloader.queue.blockDonePool)
   734  			if mode == FastSync {
   735  				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
   736  					//if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot {
   737  					cached = receipts
   738  					//}
   739  				}
   740  			}
   741  			frozen = int(atomic.LoadUint32(&blocked))
   742  			retrieved = len(tester.ownBlocks)
   743  			tester.downloader.queue.lock.Unlock()
   744  			tester.lock.Unlock()
   745  
   746  			if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
   747  				break
   748  			}
   749  		}
   750  		// Make sure we filled up the cache, then exhaust it
   751  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   752  
   753  		tester.lock.RLock()
   754  		retrieved = len(tester.ownBlocks)
   755  		tester.lock.RUnlock()
   756  		if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
   757  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
   758  		}
   759  		// Permit the blocked blocks to import
   760  		if atomic.LoadUint32(&blocked) > 0 {
   761  			atomic.StoreUint32(&blocked, uint32(0))
   762  			proceed <- struct{}{}
   763  		}
   764  	}
   765  	// Check that we haven't pulled more blocks than available
   766  	assertOwnChain(t, tester, targetBlocks+1)
   767  	if err := <-errc; err != nil {
   768  		t.Fatalf("block synchronization failed: %v", err)
   769  	}
   770  }
   771  
   772  // Tests that simple synchronization against a forked chain works correctly. In
   773  // this test common ancestor lookup should *not* be short circuited, and a full
   774  // binary search should be executed.
   775  func TestForkedSync65Full(t *testing.T) { testForkedSync(t, 65, FullSync) }
   776  func TestForkedSync65Fast(t *testing.T) { testForkedSync(t, 65, FastSync) }
   777  func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
   778  func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }
   779  
   780  func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
   781  	t.Parallel()
   782  
   783  	tester := newTester()
   784  	defer tester.terminate()
   785  
   786  	// Create a long enough forked chain
   787  	common, fork := MaxHashFetch, 2*MaxHashFetch
   788  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   789  
   790  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
   791  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
   792  
   793  	// Synchronise with the peer and make sure all blocks were retrieved
   794  	if err := tester.sync("fork A", nil, mode); err != nil {
   795  		t.Fatalf("failed to synchronise blocks: %v", err)
   796  	}
   797  	assertOwnChain(t, tester, common+fork+1)
   798  
   799  	// Synchronise with the second peer and make sure that fork is pulled too
   800  	if err := tester.sync("fork B", nil, mode); err != nil {
   801  		t.Fatalf("failed to synchronise blocks: %v", err)
   802  	}
   803  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
   804  }
   805  
   806  // Tests that synchronising against a much shorter but much heavyer fork works
   807  // corrently and is not dropped.
   808  func TestHeavyForkedSync65Full(t *testing.T) { testHeavyForkedSync(t, 65, FullSync) }
   809  
   810  func TestHeavyForkedSync65Fast(t *testing.T) { testHeavyForkedSync(t, 65, FastSync) }
   811  func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
   812  func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }
   813  
   814  func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   815  	t.Parallel()
   816  
   817  	tester := newTester()
   818  	defer tester.terminate()
   819  
   820  	// Create a long enough forked chain
   821  	common, fork := MaxHashFetch, 4*MaxHashFetch
   822  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   823  
   824  	tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
   825  	tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
   826  
   827  	// Synchronise with the peer and make sure all blocks were retrieved
   828  	if err := tester.sync("light", nil, mode); err != nil {
   829  		t.Fatalf("failed to synchronise blocks: %v", err)
   830  	}
   831  	assertOwnChain(t, tester, common+fork+1)
   832  
   833  	// Synchronise with the second peer and make sure that fork is pulled too
   834  	if err := tester.sync("heavy", nil, mode); err != nil {
   835  		t.Fatalf("failed to synchronise blocks: %v", err)
   836  	}
   837  	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
   838  }
   839  
   840  // Tests that chain forks are contained within a certain interval of the current
   841  // chain head, ensuring that malicious peers cannot waste resources by feeding
   842  // long dead chains.
   843  func TestBoundedForkedSync65Full(t *testing.T) { testBoundedForkedSync(t, 65, FullSync) }
   844  func TestBoundedForkedSync65Fast(t *testing.T) { testBoundedForkedSync(t, 65, FastSync) }
   845  func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
   846  func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }
   847  
   848  func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
   849  	t.Parallel()
   850  
   851  	tester := newTester()
   852  	defer tester.terminate()
   853  
   854  	// Create a long enough forked chain
   855  	common, fork := 13, int(MaxForkAncestry+17)
   856  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
   857  
   858  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   859  	tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
   860  
   861  	// Synchronise with the peer and make sure all blocks were retrieved
   862  	if err := tester.sync("original", nil, mode); err != nil {
   863  		t.Fatalf("failed to synchronise blocks: %v", err)
   864  	}
   865  	assertOwnChain(t, tester, common+fork+1)
   866  
   867  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   868  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   869  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   870  	}
   871  }
   872  
   873  // Tests that chain forks are contained within a certain interval of the current
   874  // chain head for short but heavy forks too. These are a bit special because they
   875  // take different ancestor lookup paths.
   876  func TestBoundedHeavyForkedSync65Full(t *testing.T) { testBoundedHeavyForkedSync(t, 65, FullSync) }
   877  func TestBoundedHeavyForkedSync65Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 65, FastSync) }
   878  func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
   879  func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
   880  
   881  func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
   882  	t.Parallel()
   883  
   884  	tester := newTester()
   885  	defer tester.terminate()
   886  
   887  	// Create a long enough forked chain
   888  	common, fork := 13, int(MaxForkAncestry+17)
   889  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false)
   890  
   891  	tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
   892  	tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
   893  
   894  	// Synchronise with the peer and make sure all blocks were retrieved
   895  	if err := tester.sync("original", nil, mode); err != nil {
   896  		t.Fatalf("failed to synchronise blocks: %v", err)
   897  	}
   898  	assertOwnChain(t, tester, common+fork+1)
   899  
   900  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   901  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   902  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   903  	}
   904  }
   905  
   906  // Tests that an inactive downloader will not accept incoming block headers and
   907  // bodies.
   908  func TestInactiveDownloader64(t *testing.T) {
   909  	t.Parallel()
   910  
   911  	tester := newTester()
   912  	defer tester.terminate()
   913  
   914  	// Check that neither block headers nor bodies are accepted
   915  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   916  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   917  	}
   918  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   919  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   920  	}
   921  }
   922  
   923  // Tests that an inactive downloader will not accept incoming block headers,
   924  // bodies and receipts.
   925  func TestInactiveDownloader65(t *testing.T) {
   926  	t.Parallel()
   927  
   928  	tester := newTester()
   929  	defer tester.terminate()
   930  
   931  	// Check that neither block headers nor bodies are accepted
   932  	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
   933  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   934  	}
   935  	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
   936  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   937  	}
   938  	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
   939  		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
   940  	}
   941  }
   942  
   943  // Tests that a canceled download wipes all previously accumulated state.
   944  func TestCancel65Fast(t *testing.T) { testCancel(t, 65, FastSync) }
   945  func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) }
   946  func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) }
   947  
   948  func testCancel(t *testing.T, protocol int, mode SyncMode) {
   949  	t.Parallel()
   950  
   951  	tester := newTester()
   952  	defer tester.terminate()
   953  
   954  	// Create a small enough block chain to download and the tester
   955  	targetBlocks := blockCacheItems - 15
   956  	if targetBlocks >= MaxHashFetch {
   957  		targetBlocks = MaxHashFetch - 15
   958  	}
   959  	if targetBlocks >= MaxHeaderFetch {
   960  		targetBlocks = MaxHeaderFetch - 15
   961  	}
   962  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   963  
   964  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
   965  
   966  	// Make sure canceling works with a pristine downloader
   967  	tester.downloader.Cancel()
   968  	if !tester.downloader.queue.Idle() {
   969  		t.Errorf("download queue not idle")
   970  	}
   971  	// Synchronise with the peer, but cancel afterwards
   972  	if err := tester.sync("peer", nil, mode); err != nil {
   973  		t.Fatalf("failed to synchronise blocks: %v", err)
   974  	}
   975  	tester.downloader.Cancel()
   976  	if !tester.downloader.queue.Idle() {
   977  		t.Errorf("download queue not idle")
   978  	}
   979  }
   980  
   981  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   982  func TestMultiSynchronisation65Full(t *testing.T) { testMultiSynchronisation(t, 65, FullSync) }
   983  func TestMultiSynchronisation65Fast(t *testing.T) { testMultiSynchronisation(t, 65, FastSync) }
   984  func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) }
   985  func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) }
   986  
   987  func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
   988  	t.Parallel()
   989  
   990  	tester := newTester()
   991  	defer tester.terminate()
   992  
   993  	// Create various peers with various parts of the chain
   994  	targetPeers := 8
   995  	targetBlocks := targetPeers*blockCacheItems - 15
   996  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
   997  
   998  	for i := 0; i < targetPeers; i++ {
   999  		id := fmt.Sprintf("peer #%d", i)
  1000  		tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts)
  1001  	}
  1002  	if err := tester.sync("peer #0", nil, mode); err != nil {
  1003  		t.Fatalf("failed to synchronise blocks: %v", err)
  1004  	}
  1005  	assertOwnChain(t, tester, targetBlocks+1)
  1006  }
  1007  
  1008  // Tests that synchronisations behave well in multi-version protocol environments
  1009  // and not wreak havoc on other nodes in the network.
  1010  func TestMultiProtoSynchronisation65Full(t *testing.T) { testMultiProtoSync(t, 65, FullSync) }
  1011  
  1012  // func TestMultiProtoSynchronisation65Fast(t *testing.T)  { testMultiProtoSync(t, 65, FastSync) }
  1013  func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) }
  1014  func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) }
  1015  
  1016  func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
  1017  	t.Parallel()
  1018  
  1019  	tester := newTester()
  1020  	defer tester.terminate()
  1021  
  1022  	// Create a small enough block chain to download
  1023  	targetBlocks := blockCacheItems - 15
  1024  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1025  
  1026  	// Create peers of every type
  1027  	tester.newPeer("peer 65", 65, hashes, headers, blocks, receipts)
  1028  	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
  1029  
  1030  	// Synchronise with the requested peer and make sure all blocks were retrieved
  1031  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  1032  		t.Fatalf("failed to synchronise blocks: %v", err)
  1033  	}
  1034  	assertOwnChain(t, tester, targetBlocks+1)
  1035  
  1036  	// Check that no peers have been dropped off
  1037  	for _, version := range []int{65, 64} {
  1038  		peer := fmt.Sprintf("peer %d", version)
  1039  		if _, ok := tester.peerHashes[peer]; !ok {
  1040  			t.Errorf("%s dropped", peer)
  1041  		}
  1042  	}
  1043  }
  1044  
  1045  // Tests that if a block is empty (e.g. header only), no body request should be
  1046  // made, and instead the header should be assembled into a whole block in itself.
  1047  func TestEmptyShortCircuit65Full(t *testing.T) { testEmptyShortCircuit(t, 65, FullSync) }
  1048  func TestEmptyShortCircuit65Fast(t *testing.T) { testEmptyShortCircuit(t, 65, FastSync) }
  1049  func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }
  1050  func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) }
  1051  
  1052  func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
  1053  	t.Parallel()
  1054  
  1055  	tester := newTester()
  1056  	defer tester.terminate()
  1057  
  1058  	// Create a block chain to download
  1059  	targetBlocks := 2*blockCacheItems - 15
  1060  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1061  
  1062  	tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1063  
  1064  	// Instrument the downloader to signal body requests
  1065  	bodiesHave, receiptsHave := int32(0), int32(0)
  1066  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  1067  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
  1068  	}
  1069  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  1070  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
  1071  	}
  1072  	// Synchronise with the peer and make sure all blocks were retrieved
  1073  	if err := tester.sync("peer", nil, mode); err != nil {
  1074  		t.Fatalf("failed to synchronise blocks: %v", err)
  1075  	}
  1076  	assertOwnChain(t, tester, targetBlocks+1)
  1077  
  1078  	// Validate the number of block bodies that should have been requested
  1079  	bodiesNeeded, receiptsNeeded := 0, 0
  1080  	for _, block := range blocks {
  1081  		if block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  1082  			bodiesNeeded++
  1083  		}
  1084  	}
  1085  	for _, receipt := range receipts {
  1086  		if mode == FastSync && len(receipt) > 0 {
  1087  			receiptsNeeded++
  1088  		}
  1089  	}
  1090  	if int(bodiesHave) != bodiesNeeded {
  1091  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  1092  	}
  1093  	if int(receiptsHave) != receiptsNeeded {
  1094  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  1095  	}
  1096  }
  1097  
  1098  // Tests that headers are enqueued continuously, preventing malicious nodes from
  1099  // stalling the downloader by feeding gapped header chains.
  1100  func TestMissingHeaderAttack65Full(t *testing.T) { testMissingHeaderAttack(t, 65, FullSync) }
  1101  
  1102  func TestMissingHeaderAttack65Fast(t *testing.T) { testMissingHeaderAttack(t, 65, FastSync) }
  1103  func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) }
  1104  func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) }
  1105  
  1106  func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1107  	t.Parallel()
  1108  
  1109  	tester := newTester()
  1110  	defer tester.terminate()
  1111  
  1112  	// Create a small enough block chain to download
  1113  	targetBlocks := blockCacheItems - 15
  1114  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1115  
  1116  	// Attempt a full sync with an attacker feeding gapped headers
  1117  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1118  	missing := targetBlocks / 2
  1119  	delete(tester.peerHeaders["attack"], hashes[missing])
  1120  
  1121  	if err := tester.sync("attack", nil, mode); err == nil {
  1122  		t.Fatalf("succeeded attacker synchronisation")
  1123  	}
  1124  	// Synchronise with the valid peer and make sure sync succeeds
  1125  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1126  	if err := tester.sync("valid", nil, mode); err != nil {
  1127  		t.Fatalf("failed to synchronise blocks: %v", err)
  1128  	}
  1129  	assertOwnChain(t, tester, targetBlocks+1)
  1130  }
  1131  
  1132  // Tests that if requested headers are shifted (i.e. first is missing), the queue
  1133  // detects the invalid numbering.
  1134  func TestShiftedHeaderAttack65Full(t *testing.T) { testShiftedHeaderAttack(t, 65, FullSync) }
  1135  func TestShiftedHeaderAttack65Fast(t *testing.T) { testShiftedHeaderAttack(t, 65, FastSync) }
  1136  func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) }
  1137  func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) }
  1138  
  1139  func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
  1140  	t.Parallel()
  1141  
  1142  	tester := newTester()
  1143  	defer tester.terminate()
  1144  
  1145  	// Create a small enough block chain to download
  1146  	targetBlocks := blockCacheItems - 15
  1147  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1148  
  1149  	// Attempt a full sync with an attacker feeding shifted headers
  1150  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1151  	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
  1152  	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
  1153  	delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
  1154  
  1155  	if err := tester.sync("attack", nil, mode); err == nil {
  1156  		t.Fatalf("succeeded attacker synchronisation")
  1157  	}
  1158  	// Synchronise with the valid peer and make sure sync succeeds
  1159  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1160  	if err := tester.sync("valid", nil, mode); err != nil {
  1161  		t.Fatalf("failed to synchronise blocks: %v", err)
  1162  	}
  1163  	assertOwnChain(t, tester, targetBlocks+1)
  1164  }
  1165  
  1166  // Tests that upon detecting an invalid header, the recent ones are rolled back
  1167  // for various failure scenarios. Afterwards a full sync is attempted to make
  1168  // sure no state was corrupted.
  1169  func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, 65, FastSync) }
  1170  func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) }
  1171  
  1172  func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
  1173  	t.Parallel()
  1174  
  1175  	tester := newTester()
  1176  	defer tester.terminate()
  1177  
  1178  	// Create a small enough block chain to download
  1179  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  1180  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1181  
  1182  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
  1183  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
  1184  	tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts)
  1185  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  1186  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing])
  1187  
  1188  	if err := tester.sync("fast-attack", nil, mode); err == nil {
  1189  		t.Fatalf("succeeded fast attacker synchronisation")
  1190  	}
  1191  	if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  1192  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  1193  	}
  1194  	// Attempt to sync with an attacker that feeds junk during the block import phase.
  1195  	// This should result in both the last fsHeaderSafetyNet number of headers being
  1196  	// rolled back, and also the pivot point being reverted to a non-block status.
  1197  	tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
  1198  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1199  	delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
  1200  	delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
  1201  
  1202  	if err := tester.sync("block-attack", nil, mode); err == nil {
  1203  		t.Fatalf("succeeded block attacker synchronisation")
  1204  	}
  1205  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1206  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1207  	}
  1208  	if mode == FastSync {
  1209  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1210  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1211  		}
  1212  	}
  1213  	// Attempt to sync with an attacker that withholds promised blocks after the
  1214  	// fast sync pivot point. This could be a trial to leave the node with a bad
  1215  	// but already imported pivot block.
  1216  	tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts)
  1217  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  1218  
  1219  	tester.downloader.syncInitHook = func(uint64, uint64) {
  1220  		for i := missing; i <= len(hashes); i++ {
  1221  			delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i])
  1222  		}
  1223  		tester.downloader.syncInitHook = nil
  1224  	}
  1225  
  1226  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
  1227  		t.Fatalf("succeeded withholding attacker synchronisation")
  1228  	}
  1229  	if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  1230  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  1231  	}
  1232  	if mode == FastSync {
  1233  		if head := tester.CurrentBlock().NumberU64(); head != 0 {
  1234  			t.Errorf("fast sync pivot block #%d not rolled back", head)
  1235  		}
  1236  	}
  1237  	// Synchronise with the valid peer and make sure sync succeeds. Since the last
  1238  	// rollback should also disable fast syncing for this process, verify that we
  1239  	// did a fresh full sync. Note, we can't assert anything about the receipts
  1240  	// since we won't purge the database of them, hence we can't use assertOwnChain.
  1241  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1242  	if err := tester.sync("valid", nil, mode); err != nil {
  1243  		t.Fatalf("failed to synchronise blocks: %v", err)
  1244  	}
  1245  	if hs := len(tester.ownHeaders); hs != len(headers) {
  1246  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers))
  1247  	}
  1248  	if bs := len(tester.ownBlocks); bs != len(blocks) {
  1249  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks))
  1250  	}
  1251  }
  1252  
  1253  // Tests that a peer advertising an high TD doesn't get to stall the downloader
  1254  // afterwards by not sending any useful hashes.
  1255  func TestHighTDStarvationAttack65Full(t *testing.T) { testHighTDStarvationAttack(t, 65, FullSync) }
  1256  
  1257  func TestHighTDStarvationAttack65Fast(t *testing.T) { testHighTDStarvationAttack(t, 65, FastSync) }
  1258  func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) }
  1259  func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) }
  1260  
  1261  func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
  1262  	t.Parallel()
  1263  
  1264  	tester := newTester()
  1265  	defer tester.terminate()
  1266  
  1267  	hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false)
  1268  	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
  1269  
  1270  	if err := tester.sync("attack", big.NewInt(100000000), mode); err != errStallingPeer {
  1271  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  1272  	}
  1273  }
  1274  
  1275  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  1276  func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, 65) }
  1277  func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
  1278  
  1279  func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
  1280  	t.Parallel()
  1281  
  1282  	// Define the disconnection requirement for individual hash fetch errors
  1283  	tests := []struct {
  1284  		result error
  1285  		drop   bool
  1286  	}{
  1287  		{nil, false},                        // Sync succeeded, all is well
  1288  		{errBusy, false},                    // Sync is already in progress, no problem
  1289  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1290  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1291  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1292  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1293  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1294  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1295  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1296  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1297  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1298  		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
  1299  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1300  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1301  		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
  1302  		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
  1303  		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
  1304  		{errCancelReceiptFetch, false},      // Synchronisation was canceled, origin may be innocent, don't drop
  1305  		{errCancelHeaderProcessing, false},  // Synchronisation was canceled, origin may be innocent, don't drop
  1306  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1307  	}
  1308  	// Run the tests and check disconnection status
  1309  	tester := newTester()
  1310  	defer tester.terminate()
  1311  
  1312  	for i, tt := range tests {
  1313  		// Register a new peer and ensure it's presence
  1314  		id := fmt.Sprintf("test %d", i)
  1315  		if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil {
  1316  			t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1317  		}
  1318  		if _, ok := tester.peerHashes[id]; !ok {
  1319  			t.Fatalf("test %d: registered peer not found", i)
  1320  		}
  1321  		// Simulate a synchronisation and check the required result
  1322  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1323  
  1324  		tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1325  		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
  1326  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1327  		}
  1328  	}
  1329  }
  1330  
  1331  // Tests that synchronisation progress (origin block number, current block number
  1332  // and highest block number) is tracked and updated correctly.
  1333  func TestSyncProgress65Full(t *testing.T) { testSyncProgress(t, 65, FullSync) }
  1334  func TestSyncProgress65Fast(t *testing.T) { testSyncProgress(t, 65, FastSync) }
  1335  func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) }
  1336  func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) }
  1337  
  1338  func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1339  	t.Parallel()
  1340  
  1341  	tester := newTester()
  1342  	defer tester.terminate()
  1343  
  1344  	// Create a small enough block chain to download
  1345  	targetBlocks := blockCacheItems - 15
  1346  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1347  
  1348  	// Set a sync init hook to catch progress changes
  1349  	starting := make(chan struct{})
  1350  	progress := make(chan struct{})
  1351  
  1352  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1353  		starting <- struct{}{}
  1354  		<-progress
  1355  	}
  1356  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1357  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1358  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1359  	}
  1360  	// Synchronise half the blocks and check initial progress
  1361  	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
  1362  	pending := new(sync.WaitGroup)
  1363  	pending.Add(1)
  1364  
  1365  	go func() {
  1366  		defer pending.Done()
  1367  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1368  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1369  		}
  1370  	}()
  1371  	<-starting
  1372  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) {
  1373  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1)
  1374  	}
  1375  	progress <- struct{}{}
  1376  	pending.Wait()
  1377  
  1378  	// Synchronise all the blocks and check continuation progress
  1379  	tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
  1380  	pending.Add(1)
  1381  
  1382  	go func() {
  1383  		defer pending.Done()
  1384  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1385  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1386  		}
  1387  	}()
  1388  	<-starting
  1389  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) {
  1390  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
  1391  	}
  1392  	progress <- struct{}{}
  1393  	pending.Wait()
  1394  
  1395  	// Check final progress after successful sync
  1396  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1397  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks)
  1398  	}
  1399  }
  1400  
  1401  // Tests that synchronisation progress (origin block number and highest block
  1402  // number) is tracked and updated correctly in case of a fork (or manual head
  1403  // revertal).
  1404  func TestForkedSyncProgress65Full(t *testing.T) { testForkedSyncProgress(t, 65, FullSync) }
  1405  func TestForkedSyncProgress65Fast(t *testing.T) { testForkedSyncProgress(t, 65, FastSync) }
  1406  func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) }
  1407  func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) }
  1408  
  1409  func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1410  	t.Parallel()
  1411  
  1412  	tester := newTester()
  1413  	defer tester.terminate()
  1414  
  1415  	// Create a forked chain to simulate origin revertal
  1416  	common, fork := MaxHashFetch, 2*MaxHashFetch
  1417  	hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true)
  1418  
  1419  	// Set a sync init hook to catch progress changes
  1420  	starting := make(chan struct{})
  1421  	progress := make(chan struct{})
  1422  
  1423  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1424  		starting <- struct{}{}
  1425  		<-progress
  1426  	}
  1427  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1428  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1429  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1430  	}
  1431  	// Synchronise with one of the forks and check progress
  1432  	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
  1433  	pending := new(sync.WaitGroup)
  1434  	pending.Add(1)
  1435  
  1436  	go func() {
  1437  		defer pending.Done()
  1438  		if err := tester.sync("fork A", nil, mode); err != nil {
  1439  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1440  		}
  1441  	}()
  1442  	<-starting
  1443  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) {
  1444  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1)
  1445  	}
  1446  	progress <- struct{}{}
  1447  	pending.Wait()
  1448  
  1449  	// Simulate a successful sync above the fork
  1450  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1451  
  1452  	// Synchronise with the second fork and check progress resets
  1453  	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
  1454  	pending.Add(1)
  1455  
  1456  	go func() {
  1457  		defer pending.Done()
  1458  		if err := tester.sync("fork B", nil, mode); err != nil {
  1459  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1460  		}
  1461  	}()
  1462  	<-starting
  1463  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1464  		t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1)
  1465  	}
  1466  	progress <- struct{}{}
  1467  	pending.Wait()
  1468  
  1469  	// Check final progress after successful sync
  1470  	if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) {
  1471  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1)
  1472  	}
  1473  }
  1474  
  1475  // Tests that if synchronisation is aborted due to some failure, then the progress
  1476  // origin is not updated in the next sync cycle, as it should be considered the
  1477  // continuation of the previous sync and not a new instance.
  1478  func TestFailedSyncProgress65Full(t *testing.T) { testFailedSyncProgress(t, 65, FullSync) }
  1479  func TestFailedSyncProgress65Fast(t *testing.T) { testFailedSyncProgress(t, 65, FastSync) }
  1480  func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) }
  1481  func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) }
  1482  
  1483  func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1484  	t.Parallel()
  1485  
  1486  	tester := newTester()
  1487  	defer tester.terminate()
  1488  
  1489  	// Create a small enough block chain to download
  1490  	targetBlocks := blockCacheItems - 15
  1491  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false)
  1492  
  1493  	// Set a sync init hook to catch progress changes
  1494  	starting := make(chan struct{})
  1495  	progress := make(chan struct{})
  1496  
  1497  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1498  		starting <- struct{}{}
  1499  		<-progress
  1500  	}
  1501  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1502  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1503  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1504  	}
  1505  	// Attempt a full sync with a faulty peer
  1506  	tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
  1507  	missing := targetBlocks / 2
  1508  	delete(tester.peerHeaders["faulty"], hashes[missing])
  1509  	delete(tester.peerBlocks["faulty"], hashes[missing])
  1510  	delete(tester.peerReceipts["faulty"], hashes[missing])
  1511  
  1512  	pending := new(sync.WaitGroup)
  1513  	pending.Add(1)
  1514  
  1515  	go func() {
  1516  		defer pending.Done()
  1517  		if err := tester.sync("faulty", nil, mode); err == nil {
  1518  			panic("succeeded faulty synchronisation")
  1519  		}
  1520  	}()
  1521  	<-starting
  1522  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) {
  1523  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks)
  1524  	}
  1525  	progress <- struct{}{}
  1526  	pending.Wait()
  1527  
  1528  	// Synchronise with a good peer and check that the progress origin remind the same after a failure
  1529  	tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
  1530  	pending.Add(1)
  1531  
  1532  	go func() {
  1533  		defer pending.Done()
  1534  		if err := tester.sync("valid", nil, mode); err != nil {
  1535  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1536  		}
  1537  	}()
  1538  	<-starting
  1539  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) {
  1540  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks)
  1541  	}
  1542  	progress <- struct{}{}
  1543  	pending.Wait()
  1544  
  1545  	// Check final progress after successful sync
  1546  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1547  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks)
  1548  	}
  1549  }
  1550  
  1551  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1552  // the progress height is successfully reduced at the next sync invocation.
  1553  func TestFakedSyncProgress65Full(t *testing.T) { testFakedSyncProgress(t, 65, FullSync) }
  1554  func TestFakedSyncProgress65Fast(t *testing.T) { testFakedSyncProgress(t, 65, FastSync) }
  1555  func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) }
  1556  func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) }
  1557  
  1558  func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
  1559  	t.Parallel()
  1560  
  1561  	tester := newTester()
  1562  	defer tester.terminate()
  1563  
  1564  	// Create a small block chain
  1565  	targetBlocks := blockCacheItems - 15
  1566  	hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false)
  1567  
  1568  	// Set a sync init hook to catch progress changes
  1569  	starting := make(chan struct{})
  1570  	progress := make(chan struct{})
  1571  
  1572  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1573  		starting <- struct{}{}
  1574  		<-progress
  1575  	}
  1576  	// Retrieve the sync progress and ensure they are zero (pristine sync)
  1577  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 {
  1578  		t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0)
  1579  	}
  1580  	//  Create and sync with an attacker that promises a higher chain than available
  1581  	tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
  1582  	for i := 1; i < 3; i++ {
  1583  		delete(tester.peerHeaders["attack"], hashes[i])
  1584  		delete(tester.peerBlocks["attack"], hashes[i])
  1585  		delete(tester.peerReceipts["attack"], hashes[i])
  1586  	}
  1587  
  1588  	pending := new(sync.WaitGroup)
  1589  	pending.Add(1)
  1590  
  1591  	go func() {
  1592  		defer pending.Done()
  1593  		if err := tester.sync("attack", nil, mode); err == nil {
  1594  			panic("succeeded attacker synchronisation")
  1595  		}
  1596  	}()
  1597  	<-starting
  1598  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) {
  1599  		t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3)
  1600  	}
  1601  	progress <- struct{}{}
  1602  	pending.Wait()
  1603  
  1604  	// Synchronise with a good peer and check that the progress height has been reduced to the true value
  1605  	tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
  1606  	pending.Add(1)
  1607  
  1608  	go func() {
  1609  		defer pending.Done()
  1610  		if err := tester.sync("valid", nil, mode); err != nil {
  1611  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1612  		}
  1613  	}()
  1614  	<-starting
  1615  	if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1616  		t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks)
  1617  	}
  1618  	progress <- struct{}{}
  1619  	pending.Wait()
  1620  
  1621  	// Check final progress after successful sync
  1622  	if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) {
  1623  		t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks)
  1624  	}
  1625  }
  1626  
  1627  // This test reproduces an issue where unexpected deliveries would
  1628  // block indefinitely if they arrived at the right time.
  1629  // We use data driven subtests to manage this so that it will be parallel on its own
  1630  // and not with the other tests, avoiding intermittent failures.
  1631  func TestDeliverHeadersHang(t *testing.T) {
  1632  	testCases := []struct {
  1633  		protocol int
  1634  		syncMode SyncMode
  1635  	}{
  1636  		//{65, FullSync},
  1637  		//{65, FastSync},
  1638  		{64, FullSync},
  1639  		{64, FastSync},
  1640  	}
  1641  	for _, tc := range testCases {
  1642  		t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
  1643  			testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
  1644  		})
  1645  	}
  1646  }
  1647  
  1648  type floodingTestPeer struct {
  1649  	peer   Peer
  1650  	tester *downloadTester
  1651  	pend   sync.WaitGroup
  1652  }
  1653  
  1654  func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1655  func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1656  	return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1657  }
  1658  func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1659  	return ftp.peer.RequestBodies(hashes)
  1660  }
  1661  func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1662  	return ftp.peer.RequestReceipts(hashes)
  1663  }
  1664  func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1665  	return ftp.peer.RequestNodeData(hashes)
  1666  }
  1667  
  1668  func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1669  	deliveriesDone := make(chan struct{}, 500)
  1670  	for i := 0; i < cap(deliveriesDone); i++ {
  1671  		peer := fmt.Sprintf("fake-peer%d", i)
  1672  		ftp.pend.Add(1)
  1673  
  1674  		go func() {
  1675  			ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1676  			deliveriesDone <- struct{}{}
  1677  			ftp.pend.Done()
  1678  		}()
  1679  	}
  1680  	// Deliver the actual requested headers.
  1681  	go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1682  	// None of the extra deliveries should block.
  1683  	timeout := time.After(60 * time.Second)
  1684  	for i := 0; i < cap(deliveriesDone); i++ {
  1685  		select {
  1686  		case <-deliveriesDone:
  1687  		case <-timeout:
  1688  			panic("blocked")
  1689  		}
  1690  	}
  1691  	return nil
  1692  }
  1693  
  1694  func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
  1695  	t.Parallel()
  1696  
  1697  	master := newTester()
  1698  	defer master.terminate()
  1699  
  1700  	hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
  1701  	for i := 0; i < 200; i++ {
  1702  		tester := newTester()
  1703  		tester.peerDb = master.peerDb
  1704  
  1705  		tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
  1706  		// Whenever the downloader requests headers, flood it with
  1707  		// a lot of unrequested header deliveries.
  1708  		tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1709  			peer:   tester.downloader.peers.peers["peer"].peer,
  1710  			tester: tester,
  1711  		}
  1712  		if err := tester.sync("peer", nil, mode); err != nil {
  1713  			t.Errorf("test %d: sync failed: %v", i, err)
  1714  		}
  1715  		tester.terminate()
  1716  
  1717  		// Flush all goroutines to prevent messing with subsequent tests
  1718  		tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait()
  1719  	}
  1720  }