github.com/LampardNguyen234/go-ethereum@v1.10.16-0.20220117140830-b6a3b0260724/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"io/ioutil"
    23  	"math/big"
    24  	"os"
    25  	"strings"
    26  	"sync"
    27  	"sync/atomic"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/LampardNguyen234/go-ethereum"
    32  	"github.com/LampardNguyen234/go-ethereum/common"
    33  	"github.com/LampardNguyen234/go-ethereum/consensus/ethash"
    34  	"github.com/LampardNguyen234/go-ethereum/core"
    35  	"github.com/LampardNguyen234/go-ethereum/core/rawdb"
    36  	"github.com/LampardNguyen234/go-ethereum/core/types"
    37  	"github.com/LampardNguyen234/go-ethereum/core/vm"
    38  	"github.com/LampardNguyen234/go-ethereum/eth/protocols/eth"
    39  	"github.com/LampardNguyen234/go-ethereum/eth/protocols/snap"
    40  	"github.com/LampardNguyen234/go-ethereum/event"
    41  	"github.com/LampardNguyen234/go-ethereum/log"
    42  	"github.com/LampardNguyen234/go-ethereum/params"
    43  	"github.com/LampardNguyen234/go-ethereum/rlp"
    44  	"github.com/LampardNguyen234/go-ethereum/trie"
    45  )
    46  
    47  // downloadTester is a test simulator for mocking out local block chain.
    48  type downloadTester struct {
    49  	freezer    string
    50  	chain      *core.BlockChain
    51  	downloader *Downloader
    52  
    53  	peers map[string]*downloadTesterPeer
    54  	lock  sync.RWMutex
    55  }
    56  
    57  // newTester creates a new downloader test mocker.
    58  func newTester() *downloadTester {
    59  	freezer, err := ioutil.TempDir("", "")
    60  	if err != nil {
    61  		panic(err)
    62  	}
    63  	db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
    64  	if err != nil {
    65  		panic(err)
    66  	}
    67  	core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000))
    68  
    69  	chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
    70  	if err != nil {
    71  		panic(err)
    72  	}
    73  	tester := &downloadTester{
    74  		freezer: freezer,
    75  		chain:   chain,
    76  		peers:   make(map[string]*downloadTesterPeer),
    77  	}
    78  	tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer)
    79  	return tester
    80  }
    81  
    82  // terminate aborts any operations on the embedded downloader and releases all
    83  // held resources.
    84  func (dl *downloadTester) terminate() {
    85  	dl.downloader.Terminate()
    86  	dl.chain.Stop()
    87  
    88  	os.RemoveAll(dl.freezer)
    89  }
    90  
    91  // sync starts synchronizing with a remote peer, blocking until it completes.
    92  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
    93  	head := dl.peers[id].chain.CurrentBlock()
    94  	if td == nil {
    95  		// If no particular TD was requested, load from the peer's blockchain
    96  		td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64())
    97  	}
    98  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
    99  	err := dl.downloader.synchronise(id, head.Hash(), td, mode)
   100  	select {
   101  	case <-dl.downloader.cancelCh:
   102  		// Ok, downloader fully cancelled after sync cycle
   103  	default:
   104  		// Downloader is still accepting packets, can block a peer up
   105  		panic("downloader active post sync cycle") // panic will be caught by tester
   106  	}
   107  	return err
   108  }
   109  
   110  // newPeer registers a new block download source into the downloader.
   111  func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer {
   112  	dl.lock.Lock()
   113  	defer dl.lock.Unlock()
   114  
   115  	peer := &downloadTesterPeer{
   116  		dl:              dl,
   117  		id:              id,
   118  		chain:           newTestBlockchain(blocks),
   119  		withholdHeaders: make(map[common.Hash]struct{}),
   120  	}
   121  	dl.peers[id] = peer
   122  
   123  	if err := dl.downloader.RegisterPeer(id, version, peer); err != nil {
   124  		panic(err)
   125  	}
   126  	if err := dl.downloader.SnapSyncer.Register(peer); err != nil {
   127  		panic(err)
   128  	}
   129  	return peer
   130  }
   131  
   132  // dropPeer simulates a hard peer removal from the connection pool.
   133  func (dl *downloadTester) dropPeer(id string) {
   134  	dl.lock.Lock()
   135  	defer dl.lock.Unlock()
   136  
   137  	delete(dl.peers, id)
   138  	dl.downloader.SnapSyncer.Unregister(id)
   139  	dl.downloader.UnregisterPeer(id)
   140  }
   141  
   142  type downloadTesterPeer struct {
   143  	dl    *downloadTester
   144  	id    string
   145  	chain *core.BlockChain
   146  
   147  	withholdHeaders map[common.Hash]struct{}
   148  }
   149  
   150  // Head constructs a function to retrieve a peer's current head hash
   151  // and total difficulty.
   152  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   153  	head := dlp.chain.CurrentBlock()
   154  	return head.Hash(), dlp.chain.GetTd(head.Hash(), head.NumberU64())
   155  }
   156  
   157  func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
   158  	var headers = make([]*types.Header, len(rlpdata))
   159  	for i, data := range rlpdata {
   160  		var h types.Header
   161  		if err := rlp.DecodeBytes(data, &h); err != nil {
   162  			panic(err)
   163  		}
   164  		headers[i] = &h
   165  	}
   166  	return headers
   167  }
   168  
   169  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   170  // origin; associated with a particular peer in the download tester. The returned
   171  // function can be used to retrieve batches of headers from the particular peer.
   172  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
   173  	// Service the header query via the live handler code
   174  	rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{
   175  		Origin: eth.HashOrNumber{
   176  			Hash: origin,
   177  		},
   178  		Amount:  uint64(amount),
   179  		Skip:    uint64(skip),
   180  		Reverse: reverse,
   181  	}, nil)
   182  	headers := unmarshalRlpHeaders(rlpHeaders)
   183  	// If a malicious peer is simulated withholding headers, delete them
   184  	for hash := range dlp.withholdHeaders {
   185  		for i, header := range headers {
   186  			if header.Hash() == hash {
   187  				headers = append(headers[:i], headers[i+1:]...)
   188  				break
   189  			}
   190  		}
   191  	}
   192  	hashes := make([]common.Hash, len(headers))
   193  	for i, header := range headers {
   194  		hashes[i] = header.Hash()
   195  	}
   196  	// Deliver the headers to the downloader
   197  	req := &eth.Request{
   198  		Peer: dlp.id,
   199  	}
   200  	res := &eth.Response{
   201  		Req:  req,
   202  		Res:  (*eth.BlockHeadersPacket)(&headers),
   203  		Meta: hashes,
   204  		Time: 1,
   205  		Done: make(chan error, 1), // Ignore the returned status
   206  	}
   207  	go func() {
   208  		sink <- res
   209  	}()
   210  	return req, nil
   211  }
   212  
   213  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   214  // origin; associated with a particular peer in the download tester. The returned
   215  // function can be used to retrieve batches of headers from the particular peer.
   216  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
   217  	// Service the header query via the live handler code
   218  	rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{
   219  		Origin: eth.HashOrNumber{
   220  			Number: origin,
   221  		},
   222  		Amount:  uint64(amount),
   223  		Skip:    uint64(skip),
   224  		Reverse: reverse,
   225  	}, nil)
   226  	headers := unmarshalRlpHeaders(rlpHeaders)
   227  	// If a malicious peer is simulated withholding headers, delete them
   228  	for hash := range dlp.withholdHeaders {
   229  		for i, header := range headers {
   230  			if header.Hash() == hash {
   231  				headers = append(headers[:i], headers[i+1:]...)
   232  				break
   233  			}
   234  		}
   235  	}
   236  	hashes := make([]common.Hash, len(headers))
   237  	for i, header := range headers {
   238  		hashes[i] = header.Hash()
   239  	}
   240  	// Deliver the headers to the downloader
   241  	req := &eth.Request{
   242  		Peer: dlp.id,
   243  	}
   244  	res := &eth.Response{
   245  		Req:  req,
   246  		Res:  (*eth.BlockHeadersPacket)(&headers),
   247  		Meta: hashes,
   248  		Time: 1,
   249  		Done: make(chan error, 1), // Ignore the returned status
   250  	}
   251  	go func() {
   252  		sink <- res
   253  	}()
   254  	return req, nil
   255  }
   256  
   257  // RequestBodies constructs a getBlockBodies method associated with a particular
   258  // peer in the download tester. The returned function can be used to retrieve
   259  // batches of block bodies from the particularly requested peer.
   260  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
   261  	blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes)
   262  
   263  	bodies := make([]*eth.BlockBody, len(blobs))
   264  	for i, blob := range blobs {
   265  		bodies[i] = new(eth.BlockBody)
   266  		rlp.DecodeBytes(blob, bodies[i])
   267  	}
   268  	var (
   269  		txsHashes   = make([]common.Hash, len(bodies))
   270  		uncleHashes = make([]common.Hash, len(bodies))
   271  	)
   272  	hasher := trie.NewStackTrie(nil)
   273  	for i, body := range bodies {
   274  		txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
   275  		uncleHashes[i] = types.CalcUncleHash(body.Uncles)
   276  	}
   277  	req := &eth.Request{
   278  		Peer: dlp.id,
   279  	}
   280  	res := &eth.Response{
   281  		Req:  req,
   282  		Res:  (*eth.BlockBodiesPacket)(&bodies),
   283  		Meta: [][]common.Hash{txsHashes, uncleHashes},
   284  		Time: 1,
   285  		Done: make(chan error, 1), // Ignore the returned status
   286  	}
   287  	go func() {
   288  		sink <- res
   289  	}()
   290  	return req, nil
   291  }
   292  
   293  // RequestReceipts constructs a getReceipts method associated with a particular
   294  // peer in the download tester. The returned function can be used to retrieve
   295  // batches of block receipts from the particularly requested peer.
   296  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
   297  	blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes)
   298  
   299  	receipts := make([][]*types.Receipt, len(blobs))
   300  	for i, blob := range blobs {
   301  		rlp.DecodeBytes(blob, &receipts[i])
   302  	}
   303  	hasher := trie.NewStackTrie(nil)
   304  	hashes = make([]common.Hash, len(receipts))
   305  	for i, receipt := range receipts {
   306  		hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
   307  	}
   308  	req := &eth.Request{
   309  		Peer: dlp.id,
   310  	}
   311  	res := &eth.Response{
   312  		Req:  req,
   313  		Res:  (*eth.ReceiptsPacket)(&receipts),
   314  		Meta: hashes,
   315  		Time: 1,
   316  		Done: make(chan error, 1), // Ignore the returned status
   317  	}
   318  	go func() {
   319  		sink <- res
   320  	}()
   321  	return req, nil
   322  }
   323  
   324  // ID retrieves the peer's unique identifier.
   325  func (dlp *downloadTesterPeer) ID() string {
   326  	return dlp.id
   327  }
   328  
   329  // RequestAccountRange fetches a batch of accounts rooted in a specific account
   330  // trie, starting with the origin.
   331  func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   332  	// Create the request and service it
   333  	req := &snap.GetAccountRangePacket{
   334  		ID:     id,
   335  		Root:   root,
   336  		Origin: origin,
   337  		Limit:  limit,
   338  		Bytes:  bytes,
   339  	}
   340  	slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req)
   341  
   342  	// We need to convert to non-slim format, delegate to the packet code
   343  	res := &snap.AccountRangePacket{
   344  		ID:       id,
   345  		Accounts: slimaccs,
   346  		Proof:    proofs,
   347  	}
   348  	hashes, accounts, _ := res.Unpack()
   349  
   350  	go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs)
   351  	return nil
   352  }
   353  
   354  // RequestStorageRanges fetches a batch of storage slots belonging to one or
   355  // more accounts. If slots from only one accout is requested, an origin marker
   356  // may also be used to retrieve from there.
   357  func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   358  	// Create the request and service it
   359  	req := &snap.GetStorageRangesPacket{
   360  		ID:       id,
   361  		Accounts: accounts,
   362  		Root:     root,
   363  		Origin:   origin,
   364  		Limit:    limit,
   365  		Bytes:    bytes,
   366  	}
   367  	storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req)
   368  
   369  	// We need to convert to demultiplex, delegate to the packet code
   370  	res := &snap.StorageRangesPacket{
   371  		ID:    id,
   372  		Slots: storage,
   373  		Proof: proofs,
   374  	}
   375  	hashes, slots := res.Unpack()
   376  
   377  	go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs)
   378  	return nil
   379  }
   380  
   381  // RequestByteCodes fetches a batch of bytecodes by hash.
   382  func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   383  	req := &snap.GetByteCodesPacket{
   384  		ID:     id,
   385  		Hashes: hashes,
   386  		Bytes:  bytes,
   387  	}
   388  	codes := snap.ServiceGetByteCodesQuery(dlp.chain, req)
   389  	go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes)
   390  	return nil
   391  }
   392  
   393  // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   394  // a specificstate trie.
   395  func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error {
   396  	req := &snap.GetTrieNodesPacket{
   397  		ID:    id,
   398  		Root:  root,
   399  		Paths: paths,
   400  		Bytes: bytes,
   401  	}
   402  	nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now())
   403  	go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes)
   404  	return nil
   405  }
   406  
   407  // Log retrieves the peer's own contextual logger.
   408  func (dlp *downloadTesterPeer) Log() log.Logger {
   409  	return log.New("peer", dlp.id)
   410  }
   411  
   412  // assertOwnChain checks if the local chain contains the correct number of items
   413  // of the various chain components.
   414  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   415  	// Mark this method as a helper to report errors at callsite, not in here
   416  	t.Helper()
   417  
   418  	headers, blocks, receipts := length, length, length
   419  	if tester.downloader.getMode() == LightSync {
   420  		blocks, receipts = 1, 1
   421  	}
   422  	if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers {
   423  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   424  	}
   425  	if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != blocks {
   426  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   427  	}
   428  	if rs := int(tester.chain.CurrentFastBlock().NumberU64()) + 1; rs != receipts {
   429  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   430  	}
   431  }
   432  
   433  func TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, eth.ETH66, FullSync) }
   434  func TestCanonicalSynchronisation66Snap(t *testing.T)  { testCanonSync(t, eth.ETH66, SnapSync) }
   435  func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
   436  
   437  func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
   438  	tester := newTester()
   439  	defer tester.terminate()
   440  
   441  	// Create a small enough block chain to download
   442  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   443  	tester.newPeer("peer", protocol, chain.blocks[1:])
   444  
   445  	// Synchronise with the peer and make sure all relevant data was retrieved
   446  	if err := tester.sync("peer", nil, mode); err != nil {
   447  		t.Fatalf("failed to synchronise blocks: %v", err)
   448  	}
   449  	assertOwnChain(t, tester, len(chain.blocks))
   450  }
   451  
   452  // Tests that if a large batch of blocks are being downloaded, it is throttled
   453  // until the cached blocks are retrieved.
   454  func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
   455  func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) }
   456  
   457  func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
   458  	tester := newTester()
   459  	defer tester.terminate()
   460  
   461  	// Create a long block chain to download and the tester
   462  	targetBlocks := len(testChainBase.blocks) - 1
   463  	tester.newPeer("peer", protocol, testChainBase.blocks[1:])
   464  
   465  	// Wrap the importer to allow stepping
   466  	blocked, proceed := uint32(0), make(chan struct{})
   467  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   468  		atomic.StoreUint32(&blocked, uint32(len(results)))
   469  		<-proceed
   470  	}
   471  	// Start a synchronisation concurrently
   472  	errc := make(chan error, 1)
   473  	go func() {
   474  		errc <- tester.sync("peer", nil, mode)
   475  	}()
   476  	// Iteratively take some blocks, always checking the retrieval count
   477  	for {
   478  		// Check the retrieval count synchronously (! reason for this ugly block)
   479  		tester.lock.RLock()
   480  		retrieved := int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1
   481  		tester.lock.RUnlock()
   482  		if retrieved >= targetBlocks+1 {
   483  			break
   484  		}
   485  		// Wait a bit for sync to throttle itself
   486  		var cached, frozen int
   487  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   488  			time.Sleep(25 * time.Millisecond)
   489  
   490  			tester.lock.Lock()
   491  			tester.downloader.queue.lock.Lock()
   492  			tester.downloader.queue.resultCache.lock.Lock()
   493  			{
   494  				cached = tester.downloader.queue.resultCache.countCompleted()
   495  				frozen = int(atomic.LoadUint32(&blocked))
   496  				retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1
   497  			}
   498  			tester.downloader.queue.resultCache.lock.Unlock()
   499  			tester.downloader.queue.lock.Unlock()
   500  			tester.lock.Unlock()
   501  
   502  			if cached == blockCacheMaxItems ||
   503  				cached == blockCacheMaxItems-reorgProtHeaderDelay ||
   504  				retrieved+cached+frozen == targetBlocks+1 ||
   505  				retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   506  				break
   507  			}
   508  		}
   509  		// Make sure we filled up the cache, then exhaust it
   510  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   511  		tester.lock.RLock()
   512  		retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1
   513  		tester.lock.RUnlock()
   514  		if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   515  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
   516  		}
   517  		// Permit the blocked blocks to import
   518  		if atomic.LoadUint32(&blocked) > 0 {
   519  			atomic.StoreUint32(&blocked, uint32(0))
   520  			proceed <- struct{}{}
   521  		}
   522  	}
   523  	// Check that we haven't pulled more blocks than available
   524  	assertOwnChain(t, tester, targetBlocks+1)
   525  	if err := <-errc; err != nil {
   526  		t.Fatalf("block synchronization failed: %v", err)
   527  	}
   528  }
   529  
   530  // Tests that simple synchronization against a forked chain works correctly. In
   531  // this test common ancestor lookup should *not* be short circuited, and a full
   532  // binary search should be executed.
   533  func TestForkedSync66Full(t *testing.T)  { testForkedSync(t, eth.ETH66, FullSync) }
   534  func TestForkedSync66Snap(t *testing.T)  { testForkedSync(t, eth.ETH66, SnapSync) }
   535  func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
   536  
   537  func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   538  	tester := newTester()
   539  	defer tester.terminate()
   540  
   541  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
   542  	chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81)
   543  	tester.newPeer("fork A", protocol, chainA.blocks[1:])
   544  	tester.newPeer("fork B", protocol, chainB.blocks[1:])
   545  	// Synchronise with the peer and make sure all blocks were retrieved
   546  	if err := tester.sync("fork A", nil, mode); err != nil {
   547  		t.Fatalf("failed to synchronise blocks: %v", err)
   548  	}
   549  	assertOwnChain(t, tester, len(chainA.blocks))
   550  
   551  	// Synchronise with the second peer and make sure that fork is pulled too
   552  	if err := tester.sync("fork B", nil, mode); err != nil {
   553  		t.Fatalf("failed to synchronise blocks: %v", err)
   554  	}
   555  	assertOwnChain(t, tester, len(chainB.blocks))
   556  }
   557  
   558  // Tests that synchronising against a much shorter but much heavyer fork works
   559  // corrently and is not dropped.
   560  func TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FullSync) }
   561  func TestHeavyForkedSync66Snap(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, SnapSync) }
   562  func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
   563  
   564  func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   565  	tester := newTester()
   566  	defer tester.terminate()
   567  
   568  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
   569  	chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79)
   570  	tester.newPeer("light", protocol, chainA.blocks[1:])
   571  	tester.newPeer("heavy", protocol, chainB.blocks[1:])
   572  
   573  	// Synchronise with the peer and make sure all blocks were retrieved
   574  	if err := tester.sync("light", nil, mode); err != nil {
   575  		t.Fatalf("failed to synchronise blocks: %v", err)
   576  	}
   577  	assertOwnChain(t, tester, len(chainA.blocks))
   578  
   579  	// Synchronise with the second peer and make sure that fork is pulled too
   580  	if err := tester.sync("heavy", nil, mode); err != nil {
   581  		t.Fatalf("failed to synchronise blocks: %v", err)
   582  	}
   583  	assertOwnChain(t, tester, len(chainB.blocks))
   584  }
   585  
   586  // Tests that chain forks are contained within a certain interval of the current
   587  // chain head, ensuring that malicious peers cannot waste resources by feeding
   588  // long dead chains.
   589  func TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FullSync) }
   590  func TestBoundedForkedSync66Snap(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, SnapSync) }
   591  func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
   592  
   593  func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   594  	tester := newTester()
   595  	defer tester.terminate()
   596  
   597  	chainA := testChainForkLightA
   598  	chainB := testChainForkLightB
   599  	tester.newPeer("original", protocol, chainA.blocks[1:])
   600  	tester.newPeer("rewriter", protocol, chainB.blocks[1:])
   601  
   602  	// Synchronise with the peer and make sure all blocks were retrieved
   603  	if err := tester.sync("original", nil, mode); err != nil {
   604  		t.Fatalf("failed to synchronise blocks: %v", err)
   605  	}
   606  	assertOwnChain(t, tester, len(chainA.blocks))
   607  
   608  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   609  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   610  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   611  	}
   612  }
   613  
   614  // Tests that chain forks are contained within a certain interval of the current
   615  // chain head for short but heavy forks too. These are a bit special because they
   616  // take different ancestor lookup paths.
   617  func TestBoundedHeavyForkedSync66Full(t *testing.T) {
   618  	testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
   619  }
   620  func TestBoundedHeavyForkedSync66Snap(t *testing.T) {
   621  	testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync)
   622  }
   623  func TestBoundedHeavyForkedSync66Light(t *testing.T) {
   624  	testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
   625  }
   626  
   627  func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   628  	tester := newTester()
   629  	defer tester.terminate()
   630  
   631  	// Create a long enough forked chain
   632  	chainA := testChainForkLightA
   633  	chainB := testChainForkHeavy
   634  	tester.newPeer("original", protocol, chainA.blocks[1:])
   635  
   636  	// Synchronise with the peer and make sure all blocks were retrieved
   637  	if err := tester.sync("original", nil, mode); err != nil {
   638  		t.Fatalf("failed to synchronise blocks: %v", err)
   639  	}
   640  	assertOwnChain(t, tester, len(chainA.blocks))
   641  
   642  	tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:])
   643  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   644  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   645  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   646  	}
   647  }
   648  
   649  // Tests that a canceled download wipes all previously accumulated state.
   650  func TestCancel66Full(t *testing.T)  { testCancel(t, eth.ETH66, FullSync) }
   651  func TestCancel66Snap(t *testing.T)  { testCancel(t, eth.ETH66, SnapSync) }
   652  func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
   653  
   654  func testCancel(t *testing.T, protocol uint, mode SyncMode) {
   655  	tester := newTester()
   656  	defer tester.terminate()
   657  
   658  	chain := testChainBase.shorten(MaxHeaderFetch)
   659  	tester.newPeer("peer", protocol, chain.blocks[1:])
   660  
   661  	// Make sure canceling works with a pristine downloader
   662  	tester.downloader.Cancel()
   663  	if !tester.downloader.queue.Idle() {
   664  		t.Errorf("download queue not idle")
   665  	}
   666  	// Synchronise with the peer, but cancel afterwards
   667  	if err := tester.sync("peer", nil, mode); err != nil {
   668  		t.Fatalf("failed to synchronise blocks: %v", err)
   669  	}
   670  	tester.downloader.Cancel()
   671  	if !tester.downloader.queue.Idle() {
   672  		t.Errorf("download queue not idle")
   673  	}
   674  }
   675  
   676  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   677  func TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FullSync) }
   678  func TestMultiSynchronisation66Snap(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, SnapSync) }
   679  func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
   680  
   681  func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
   682  	tester := newTester()
   683  	defer tester.terminate()
   684  
   685  	// Create various peers with various parts of the chain
   686  	targetPeers := 8
   687  	chain := testChainBase.shorten(targetPeers * 100)
   688  
   689  	for i := 0; i < targetPeers; i++ {
   690  		id := fmt.Sprintf("peer #%d", i)
   691  		tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:])
   692  	}
   693  	if err := tester.sync("peer #0", nil, mode); err != nil {
   694  		t.Fatalf("failed to synchronise blocks: %v", err)
   695  	}
   696  	assertOwnChain(t, tester, len(chain.blocks))
   697  }
   698  
   699  // Tests that synchronisations behave well in multi-version protocol environments
   700  // and not wreak havoc on other nodes in the network.
   701  func TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FullSync) }
   702  func TestMultiProtoSynchronisation66Snap(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, SnapSync) }
   703  func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
   704  
   705  func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
   706  	tester := newTester()
   707  	defer tester.terminate()
   708  
   709  	// Create a small enough block chain to download
   710  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   711  
   712  	// Create peers of every type
   713  	tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:])
   714  	//tester.newPeer("peer 65", eth.ETH67, chain.blocks[1:)
   715  
   716  	// Synchronise with the requested peer and make sure all blocks were retrieved
   717  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   718  		t.Fatalf("failed to synchronise blocks: %v", err)
   719  	}
   720  	assertOwnChain(t, tester, len(chain.blocks))
   721  
   722  	// Check that no peers have been dropped off
   723  	for _, version := range []int{66} {
   724  		peer := fmt.Sprintf("peer %d", version)
   725  		if _, ok := tester.peers[peer]; !ok {
   726  			t.Errorf("%s dropped", peer)
   727  		}
   728  	}
   729  }
   730  
   731  // Tests that if a block is empty (e.g. header only), no body request should be
   732  // made, and instead the header should be assembled into a whole block in itself.
   733  func TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
   734  func TestEmptyShortCircuit66Snap(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, SnapSync) }
   735  func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
   736  
   737  func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
   738  	tester := newTester()
   739  	defer tester.terminate()
   740  
   741  	// Create a block chain to download
   742  	chain := testChainBase
   743  	tester.newPeer("peer", protocol, chain.blocks[1:])
   744  
   745  	// Instrument the downloader to signal body requests
   746  	bodiesHave, receiptsHave := int32(0), int32(0)
   747  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   748  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   749  	}
   750  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   751  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   752  	}
   753  	// Synchronise with the peer and make sure all blocks were retrieved
   754  	if err := tester.sync("peer", nil, mode); err != nil {
   755  		t.Fatalf("failed to synchronise blocks: %v", err)
   756  	}
   757  	assertOwnChain(t, tester, len(chain.blocks))
   758  
   759  	// Validate the number of block bodies that should have been requested
   760  	bodiesNeeded, receiptsNeeded := 0, 0
   761  	for _, block := range chain.blocks[1:] {
   762  		if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   763  			bodiesNeeded++
   764  		}
   765  	}
   766  	for _, block := range chain.blocks[1:] {
   767  		if mode == SnapSync && len(block.Transactions()) > 0 {
   768  			receiptsNeeded++
   769  		}
   770  	}
   771  	if int(bodiesHave) != bodiesNeeded {
   772  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   773  	}
   774  	if int(receiptsHave) != receiptsNeeded {
   775  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   776  	}
   777  }
   778  
   779  // Tests that headers are enqueued continuously, preventing malicious nodes from
   780  // stalling the downloader by feeding gapped header chains.
   781  func TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
   782  func TestMissingHeaderAttack66Snap(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, SnapSync) }
   783  func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
   784  
   785  func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   786  	tester := newTester()
   787  	defer tester.terminate()
   788  
   789  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   790  
   791  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
   792  	attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{}
   793  
   794  	if err := tester.sync("attack", nil, mode); err == nil {
   795  		t.Fatalf("succeeded attacker synchronisation")
   796  	}
   797  	// Synchronise with the valid peer and make sure sync succeeds
   798  	tester.newPeer("valid", protocol, chain.blocks[1:])
   799  	if err := tester.sync("valid", nil, mode); err != nil {
   800  		t.Fatalf("failed to synchronise blocks: %v", err)
   801  	}
   802  	assertOwnChain(t, tester, len(chain.blocks))
   803  }
   804  
   805  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   806  // detects the invalid numbering.
   807  func TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
   808  func TestShiftedHeaderAttack66Snap(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) }
   809  func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
   810  
   811  func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   812  	tester := newTester()
   813  	defer tester.terminate()
   814  
   815  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   816  
   817  	// Attempt a full sync with an attacker feeding shifted headers
   818  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
   819  	attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{}
   820  
   821  	if err := tester.sync("attack", nil, mode); err == nil {
   822  		t.Fatalf("succeeded attacker synchronisation")
   823  	}
   824  	// Synchronise with the valid peer and make sure sync succeeds
   825  	tester.newPeer("valid", protocol, chain.blocks[1:])
   826  	if err := tester.sync("valid", nil, mode); err != nil {
   827  		t.Fatalf("failed to synchronise blocks: %v", err)
   828  	}
   829  	assertOwnChain(t, tester, len(chain.blocks))
   830  }
   831  
   832  // Tests that upon detecting an invalid header, the recent ones are rolled back
   833  // for various failure scenarios. Afterwards a full sync is attempted to make
   834  // sure no state was corrupted.
   835  func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) }
   836  
   837  func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
   838  	tester := newTester()
   839  	defer tester.terminate()
   840  
   841  	// Create a small enough block chain to download
   842  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
   843  	chain := testChainBase.shorten(targetBlocks)
   844  
   845  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
   846  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
   847  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
   848  
   849  	fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:])
   850  	fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
   851  
   852  	if err := tester.sync("fast-attack", nil, mode); err == nil {
   853  		t.Fatalf("succeeded fast attacker synchronisation")
   854  	}
   855  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
   856  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
   857  	}
   858  	// Attempt to sync with an attacker that feeds junk during the block import phase.
   859  	// This should result in both the last fsHeaderSafetyNet number of headers being
   860  	// rolled back, and also the pivot point being reverted to a non-block status.
   861  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
   862  
   863  	blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:])
   864  	fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in
   865  	blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
   866  
   867  	if err := tester.sync("block-attack", nil, mode); err == nil {
   868  		t.Fatalf("succeeded block attacker synchronisation")
   869  	}
   870  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   871  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   872  	}
   873  	if mode == SnapSync {
   874  		if head := tester.chain.CurrentBlock().NumberU64(); head != 0 {
   875  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   876  		}
   877  	}
   878  	// Attempt to sync with an attacker that withholds promised blocks after the
   879  	// fast sync pivot point. This could be a trial to leave the node with a bad
   880  	// but already imported pivot block.
   881  	withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:])
   882  
   883  	tester.downloader.syncInitHook = func(uint64, uint64) {
   884  		for i := missing; i < len(chain.blocks); i++ {
   885  			withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
   886  		}
   887  		tester.downloader.syncInitHook = nil
   888  	}
   889  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
   890  		t.Fatalf("succeeded withholding attacker synchronisation")
   891  	}
   892  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   893  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   894  	}
   895  	if mode == SnapSync {
   896  		if head := tester.chain.CurrentBlock().NumberU64(); head != 0 {
   897  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   898  		}
   899  	}
   900  	// Synchronise with the valid peer and make sure sync succeeds. Since the last rollback
   901  	// should also disable fast syncing for this process, verify that we did a fresh full
   902  	// sync. Note, we can't assert anything about the receipts since we won't purge the
   903  	// database of them, hence we can't use assertOwnChain.
   904  	tester.newPeer("valid", protocol, chain.blocks[1:])
   905  	if err := tester.sync("valid", nil, mode); err != nil {
   906  		t.Fatalf("failed to synchronise blocks: %v", err)
   907  	}
   908  	assertOwnChain(t, tester, len(chain.blocks))
   909  }
   910  
   911  // Tests that a peer advertising a high TD doesn't get to stall the downloader
   912  // afterwards by not sending any useful hashes.
   913  func TestHighTDStarvationAttack66Full(t *testing.T) {
   914  	testHighTDStarvationAttack(t, eth.ETH66, FullSync)
   915  }
   916  func TestHighTDStarvationAttack66Snap(t *testing.T) {
   917  	testHighTDStarvationAttack(t, eth.ETH66, SnapSync)
   918  }
   919  func TestHighTDStarvationAttack66Light(t *testing.T) {
   920  	testHighTDStarvationAttack(t, eth.ETH66, LightSync)
   921  }
   922  
   923  func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
   924  	tester := newTester()
   925  	defer tester.terminate()
   926  
   927  	chain := testChainBase.shorten(1)
   928  	tester.newPeer("attack", protocol, chain.blocks[1:])
   929  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
   930  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
   931  	}
   932  }
   933  
   934  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
   935  func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
   936  
   937  func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
   938  	// Define the disconnection requirement for individual hash fetch errors
   939  	tests := []struct {
   940  		result error
   941  		drop   bool
   942  	}{
   943  		{nil, false},                        // Sync succeeded, all is well
   944  		{errBusy, false},                    // Sync is already in progress, no problem
   945  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
   946  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
   947  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
   948  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
   949  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
   950  		{errTimeout, true},                  // No hashes received in due time, drop the peer
   951  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
   952  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
   953  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
   954  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
   955  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
   956  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
   957  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
   958  	}
   959  	// Run the tests and check disconnection status
   960  	tester := newTester()
   961  	defer tester.terminate()
   962  	chain := testChainBase.shorten(1)
   963  
   964  	for i, tt := range tests {
   965  		// Register a new peer and ensure its presence
   966  		id := fmt.Sprintf("test %d", i)
   967  		tester.newPeer(id, protocol, chain.blocks[1:])
   968  		if _, ok := tester.peers[id]; !ok {
   969  			t.Fatalf("test %d: registered peer not found", i)
   970  		}
   971  		// Simulate a synchronisation and check the required result
   972  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
   973  
   974  		tester.downloader.Synchronise(id, tester.chain.Genesis().Hash(), big.NewInt(1000), FullSync)
   975  		if _, ok := tester.peers[id]; !ok != tt.drop {
   976  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
   977  		}
   978  	}
   979  }
   980  
   981  // Tests that synchronisation progress (origin block number, current block number
   982  // and highest block number) is tracked and updated correctly.
   983  func TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, eth.ETH66, FullSync) }
   984  func TestSyncProgress66Snap(t *testing.T)  { testSyncProgress(t, eth.ETH66, SnapSync) }
   985  func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
   986  
   987  func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
   988  	tester := newTester()
   989  	defer tester.terminate()
   990  
   991  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   992  
   993  	// Set a sync init hook to catch progress changes
   994  	starting := make(chan struct{})
   995  	progress := make(chan struct{})
   996  
   997  	tester.downloader.syncInitHook = func(origin, latest uint64) {
   998  		starting <- struct{}{}
   999  		<-progress
  1000  	}
  1001  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1002  
  1003  	// Synchronise half the blocks and check initial progress
  1004  	tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:])
  1005  	pending := new(sync.WaitGroup)
  1006  	pending.Add(1)
  1007  
  1008  	go func() {
  1009  		defer pending.Done()
  1010  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1011  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1012  		}
  1013  	}()
  1014  	<-starting
  1015  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1016  		HighestBlock: uint64(len(chain.blocks)/2 - 1),
  1017  	})
  1018  	progress <- struct{}{}
  1019  	pending.Wait()
  1020  
  1021  	// Synchronise all the blocks and check continuation progress
  1022  	tester.newPeer("peer-full", protocol, chain.blocks[1:])
  1023  	pending.Add(1)
  1024  	go func() {
  1025  		defer pending.Done()
  1026  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1027  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1028  		}
  1029  	}()
  1030  	<-starting
  1031  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1032  		StartingBlock: uint64(len(chain.blocks)/2 - 1),
  1033  		CurrentBlock:  uint64(len(chain.blocks)/2 - 1),
  1034  		HighestBlock:  uint64(len(chain.blocks) - 1),
  1035  	})
  1036  
  1037  	// Check final progress after successful sync
  1038  	progress <- struct{}{}
  1039  	pending.Wait()
  1040  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1041  		StartingBlock: uint64(len(chain.blocks)/2 - 1),
  1042  		CurrentBlock:  uint64(len(chain.blocks) - 1),
  1043  		HighestBlock:  uint64(len(chain.blocks) - 1),
  1044  	})
  1045  }
  1046  
  1047  func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1048  	// Mark this method as a helper to report errors at callsite, not in here
  1049  	t.Helper()
  1050  
  1051  	p := d.Progress()
  1052  	if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock {
  1053  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1054  	}
  1055  }
  1056  
  1057  // Tests that synchronisation progress (origin block number and highest block
  1058  // number) is tracked and updated correctly in case of a fork (or manual head
  1059  // revertal).
  1060  func TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FullSync) }
  1061  func TestForkedSyncProgress66Snap(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, SnapSync) }
  1062  func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
  1063  
  1064  func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1065  	tester := newTester()
  1066  	defer tester.terminate()
  1067  
  1068  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
  1069  	chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
  1070  
  1071  	// Set a sync init hook to catch progress changes
  1072  	starting := make(chan struct{})
  1073  	progress := make(chan struct{})
  1074  
  1075  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1076  		starting <- struct{}{}
  1077  		<-progress
  1078  	}
  1079  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1080  
  1081  	// Synchronise with one of the forks and check progress
  1082  	tester.newPeer("fork A", protocol, chainA.blocks[1:])
  1083  	pending := new(sync.WaitGroup)
  1084  	pending.Add(1)
  1085  	go func() {
  1086  		defer pending.Done()
  1087  		if err := tester.sync("fork A", nil, mode); err != nil {
  1088  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1089  		}
  1090  	}()
  1091  	<-starting
  1092  
  1093  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1094  		HighestBlock: uint64(len(chainA.blocks) - 1),
  1095  	})
  1096  	progress <- struct{}{}
  1097  	pending.Wait()
  1098  
  1099  	// Simulate a successful sync above the fork
  1100  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1101  
  1102  	// Synchronise with the second fork and check progress resets
  1103  	tester.newPeer("fork B", protocol, chainB.blocks[1:])
  1104  	pending.Add(1)
  1105  	go func() {
  1106  		defer pending.Done()
  1107  		if err := tester.sync("fork B", nil, mode); err != nil {
  1108  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1109  		}
  1110  	}()
  1111  	<-starting
  1112  	checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1113  		StartingBlock: uint64(len(testChainBase.blocks)) - 1,
  1114  		CurrentBlock:  uint64(len(chainA.blocks) - 1),
  1115  		HighestBlock:  uint64(len(chainB.blocks) - 1),
  1116  	})
  1117  
  1118  	// Check final progress after successful sync
  1119  	progress <- struct{}{}
  1120  	pending.Wait()
  1121  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1122  		StartingBlock: uint64(len(testChainBase.blocks)) - 1,
  1123  		CurrentBlock:  uint64(len(chainB.blocks) - 1),
  1124  		HighestBlock:  uint64(len(chainB.blocks) - 1),
  1125  	})
  1126  }
  1127  
  1128  // Tests that if synchronisation is aborted due to some failure, then the progress
  1129  // origin is not updated in the next sync cycle, as it should be considered the
  1130  // continuation of the previous sync and not a new instance.
  1131  func TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FullSync) }
  1132  func TestFailedSyncProgress66Snap(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, SnapSync) }
  1133  func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
  1134  
  1135  func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1136  	tester := newTester()
  1137  	defer tester.terminate()
  1138  
  1139  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1140  
  1141  	// Set a sync init hook to catch progress changes
  1142  	starting := make(chan struct{})
  1143  	progress := make(chan struct{})
  1144  
  1145  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1146  		starting <- struct{}{}
  1147  		<-progress
  1148  	}
  1149  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1150  
  1151  	// Attempt a full sync with a faulty peer
  1152  	missing := len(chain.blocks)/2 - 1
  1153  
  1154  	faulter := tester.newPeer("faulty", protocol, chain.blocks[1:])
  1155  	faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
  1156  
  1157  	pending := new(sync.WaitGroup)
  1158  	pending.Add(1)
  1159  	go func() {
  1160  		defer pending.Done()
  1161  		if err := tester.sync("faulty", nil, mode); err == nil {
  1162  			panic("succeeded faulty synchronisation")
  1163  		}
  1164  	}()
  1165  	<-starting
  1166  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1167  		HighestBlock: uint64(len(chain.blocks) - 1),
  1168  	})
  1169  	progress <- struct{}{}
  1170  	pending.Wait()
  1171  	afterFailedSync := tester.downloader.Progress()
  1172  
  1173  	// Synchronise with a good peer and check that the progress origin remind the same
  1174  	// after a failure
  1175  	tester.newPeer("valid", protocol, chain.blocks[1:])
  1176  	pending.Add(1)
  1177  	go func() {
  1178  		defer pending.Done()
  1179  		if err := tester.sync("valid", nil, mode); err != nil {
  1180  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1181  		}
  1182  	}()
  1183  	<-starting
  1184  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1185  
  1186  	// Check final progress after successful sync
  1187  	progress <- struct{}{}
  1188  	pending.Wait()
  1189  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1190  		CurrentBlock: uint64(len(chain.blocks) - 1),
  1191  		HighestBlock: uint64(len(chain.blocks) - 1),
  1192  	})
  1193  }
  1194  
  1195  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1196  // the progress height is successfully reduced at the next sync invocation.
  1197  func TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FullSync) }
  1198  func TestFakedSyncProgress66Snap(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, SnapSync) }
  1199  func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
  1200  
  1201  func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1202  	tester := newTester()
  1203  	defer tester.terminate()
  1204  
  1205  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1206  
  1207  	// Set a sync init hook to catch progress changes
  1208  	starting := make(chan struct{})
  1209  	progress := make(chan struct{})
  1210  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1211  		starting <- struct{}{}
  1212  		<-progress
  1213  	}
  1214  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1215  
  1216  	// Create and sync with an attacker that promises a higher chain than available.
  1217  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
  1218  	numMissing := 5
  1219  	for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- {
  1220  		attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
  1221  	}
  1222  	pending := new(sync.WaitGroup)
  1223  	pending.Add(1)
  1224  	go func() {
  1225  		defer pending.Done()
  1226  		if err := tester.sync("attack", nil, mode); err == nil {
  1227  			panic("succeeded attacker synchronisation")
  1228  		}
  1229  	}()
  1230  	<-starting
  1231  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1232  		HighestBlock: uint64(len(chain.blocks) - 1),
  1233  	})
  1234  	progress <- struct{}{}
  1235  	pending.Wait()
  1236  	afterFailedSync := tester.downloader.Progress()
  1237  
  1238  	// Synchronise with a good peer and check that the progress height has been reduced to
  1239  	// the true value.
  1240  	validChain := chain.shorten(len(chain.blocks) - numMissing)
  1241  	tester.newPeer("valid", protocol, validChain.blocks[1:])
  1242  	pending.Add(1)
  1243  
  1244  	go func() {
  1245  		defer pending.Done()
  1246  		if err := tester.sync("valid", nil, mode); err != nil {
  1247  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1248  		}
  1249  	}()
  1250  	<-starting
  1251  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1252  		CurrentBlock: afterFailedSync.CurrentBlock,
  1253  		HighestBlock: uint64(len(validChain.blocks) - 1),
  1254  	})
  1255  	// Check final progress after successful sync.
  1256  	progress <- struct{}{}
  1257  	pending.Wait()
  1258  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1259  		CurrentBlock: uint64(len(validChain.blocks) - 1),
  1260  		HighestBlock: uint64(len(validChain.blocks) - 1),
  1261  	})
  1262  }
  1263  
  1264  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1265  	testCases := []struct {
  1266  		remoteHeight uint64
  1267  		localHeight  uint64
  1268  		expected     []int
  1269  	}{
  1270  		// Remote is way higher. We should ask for the remote head and go backwards
  1271  		{1500, 1000,
  1272  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1273  		},
  1274  		{15000, 13006,
  1275  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1276  		},
  1277  		// Remote is pretty close to us. We don't have to fetch as many
  1278  		{1200, 1150,
  1279  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1280  		},
  1281  		// Remote is equal to us (so on a fork with higher td)
  1282  		// We should get the closest couple of ancestors
  1283  		{1500, 1500,
  1284  			[]int{1497, 1499},
  1285  		},
  1286  		// We're higher than the remote! Odd
  1287  		{1000, 1500,
  1288  			[]int{997, 999},
  1289  		},
  1290  		// Check some weird edgecases that it behaves somewhat rationally
  1291  		{0, 1500,
  1292  			[]int{0, 2},
  1293  		},
  1294  		{6000000, 0,
  1295  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1296  		},
  1297  		{0, 0,
  1298  			[]int{0, 2},
  1299  		},
  1300  	}
  1301  	reqs := func(from, count, span int) []int {
  1302  		var r []int
  1303  		num := from
  1304  		for len(r) < count {
  1305  			r = append(r, num)
  1306  			num += span + 1
  1307  		}
  1308  		return r
  1309  	}
  1310  	for i, tt := range testCases {
  1311  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1312  		data := reqs(int(from), count, span)
  1313  
  1314  		if max != uint64(data[len(data)-1]) {
  1315  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1316  		}
  1317  		failed := false
  1318  		if len(data) != len(tt.expected) {
  1319  			failed = true
  1320  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1321  		} else {
  1322  			for j, n := range data {
  1323  				if n != tt.expected[j] {
  1324  					failed = true
  1325  					break
  1326  				}
  1327  			}
  1328  		}
  1329  		if failed {
  1330  			res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1331  			exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1332  			t.Logf("got: %v\n", res)
  1333  			t.Logf("exp: %v\n", exp)
  1334  			t.Errorf("test %d: wrong values", i)
  1335  		}
  1336  	}
  1337  }
  1338  
  1339  // Tests that peers below a pre-configured checkpoint block are prevented from
  1340  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1341  func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
  1342  func TestCheckpointEnforcement66Snap(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, SnapSync) }
  1343  func TestCheckpointEnforcement66Light(t *testing.T) {
  1344  	testCheckpointEnforcement(t, eth.ETH66, LightSync)
  1345  }
  1346  
  1347  func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
  1348  	// Create a new tester with a particular hard coded checkpoint block
  1349  	tester := newTester()
  1350  	defer tester.terminate()
  1351  
  1352  	tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1353  	chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1354  
  1355  	// Attempt to sync with the peer and validate the result
  1356  	tester.newPeer("peer", protocol, chain.blocks[1:])
  1357  
  1358  	var expect error
  1359  	if mode == SnapSync || mode == LightSync {
  1360  		expect = errUnsyncedPeer
  1361  	}
  1362  	if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1363  		t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1364  	}
  1365  	if mode == SnapSync || mode == LightSync {
  1366  		assertOwnChain(t, tester, 1)
  1367  	} else {
  1368  		assertOwnChain(t, tester, len(chain.blocks))
  1369  	}
  1370  }