github.com/codysnider/go-ethereum@v1.10.18-0.20220420071915-14f4ae99222a/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"os"
    24  	"strings"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum"
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/consensus/ethash"
    33  	"github.com/ethereum/go-ethereum/core"
    34  	"github.com/ethereum/go-ethereum/core/rawdb"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/core/vm"
    37  	"github.com/ethereum/go-ethereum/eth/protocols/eth"
    38  	"github.com/ethereum/go-ethereum/eth/protocols/snap"
    39  	"github.com/ethereum/go-ethereum/event"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/params"
    42  	"github.com/ethereum/go-ethereum/rlp"
    43  	"github.com/ethereum/go-ethereum/trie"
    44  )
    45  
    46  // downloadTester is a test simulator for mocking out local block chain.
    47  type downloadTester struct {
    48  	freezer    string
    49  	chain      *core.BlockChain
    50  	downloader *Downloader
    51  
    52  	peers map[string]*downloadTesterPeer
    53  	lock  sync.RWMutex
    54  }
    55  
    56  // newTester creates a new downloader test mocker.
    57  func newTester(t *testing.T) *downloadTester {
    58  	return newTesterWithNotification(t, nil)
    59  }
    60  
    61  // newTester creates a new downloader test mocker.
    62  func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
    63  	freezer := t.TempDir()
    64  	db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
    65  	if err != nil {
    66  		panic(err)
    67  	}
    68  	t.Cleanup(func() {
    69  		db.Close()
    70  	})
    71  	core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000))
    72  
    73  	chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
    74  	if err != nil {
    75  		panic(err)
    76  	}
    77  	tester := &downloadTester{
    78  		freezer: freezer,
    79  		chain:   chain,
    80  		peers:   make(map[string]*downloadTesterPeer),
    81  	}
    82  	tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success)
    83  	return tester
    84  }
    85  
    86  // terminate aborts any operations on the embedded downloader and releases all
    87  // held resources.
    88  func (dl *downloadTester) terminate() {
    89  	dl.downloader.Terminate()
    90  	dl.chain.Stop()
    91  
    92  	os.RemoveAll(dl.freezer)
    93  }
    94  
    95  // sync starts synchronizing with a remote peer, blocking until it completes.
    96  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
    97  	head := dl.peers[id].chain.CurrentBlock()
    98  	if td == nil {
    99  		// If no particular TD was requested, load from the peer's blockchain
   100  		td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64())
   101  	}
   102  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   103  	err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil)
   104  	select {
   105  	case <-dl.downloader.cancelCh:
   106  		// Ok, downloader fully cancelled after sync cycle
   107  	default:
   108  		// Downloader is still accepting packets, can block a peer up
   109  		panic("downloader active post sync cycle") // panic will be caught by tester
   110  	}
   111  	return err
   112  }
   113  
   114  // newPeer registers a new block download source into the downloader.
   115  func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer {
   116  	dl.lock.Lock()
   117  	defer dl.lock.Unlock()
   118  
   119  	peer := &downloadTesterPeer{
   120  		dl:              dl,
   121  		id:              id,
   122  		chain:           newTestBlockchain(blocks),
   123  		withholdHeaders: make(map[common.Hash]struct{}),
   124  	}
   125  	dl.peers[id] = peer
   126  
   127  	if err := dl.downloader.RegisterPeer(id, version, peer); err != nil {
   128  		panic(err)
   129  	}
   130  	if err := dl.downloader.SnapSyncer.Register(peer); err != nil {
   131  		panic(err)
   132  	}
   133  	return peer
   134  }
   135  
   136  // dropPeer simulates a hard peer removal from the connection pool.
   137  func (dl *downloadTester) dropPeer(id string) {
   138  	dl.lock.Lock()
   139  	defer dl.lock.Unlock()
   140  
   141  	delete(dl.peers, id)
   142  	dl.downloader.SnapSyncer.Unregister(id)
   143  	dl.downloader.UnregisterPeer(id)
   144  }
   145  
   146  type downloadTesterPeer struct {
   147  	dl    *downloadTester
   148  	id    string
   149  	chain *core.BlockChain
   150  
   151  	withholdHeaders map[common.Hash]struct{}
   152  }
   153  
   154  // Head constructs a function to retrieve a peer's current head hash
   155  // and total difficulty.
   156  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   157  	head := dlp.chain.CurrentBlock()
   158  	return head.Hash(), dlp.chain.GetTd(head.Hash(), head.NumberU64())
   159  }
   160  
   161  func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
   162  	var headers = make([]*types.Header, len(rlpdata))
   163  	for i, data := range rlpdata {
   164  		var h types.Header
   165  		if err := rlp.DecodeBytes(data, &h); err != nil {
   166  			panic(err)
   167  		}
   168  		headers[i] = &h
   169  	}
   170  	return headers
   171  }
   172  
   173  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   174  // origin; associated with a particular peer in the download tester. The returned
   175  // function can be used to retrieve batches of headers from the particular peer.
   176  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
   177  	// Service the header query via the live handler code
   178  	rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{
   179  		Origin: eth.HashOrNumber{
   180  			Hash: origin,
   181  		},
   182  		Amount:  uint64(amount),
   183  		Skip:    uint64(skip),
   184  		Reverse: reverse,
   185  	}, nil)
   186  	headers := unmarshalRlpHeaders(rlpHeaders)
   187  	// If a malicious peer is simulated withholding headers, delete them
   188  	for hash := range dlp.withholdHeaders {
   189  		for i, header := range headers {
   190  			if header.Hash() == hash {
   191  				headers = append(headers[:i], headers[i+1:]...)
   192  				break
   193  			}
   194  		}
   195  	}
   196  	hashes := make([]common.Hash, len(headers))
   197  	for i, header := range headers {
   198  		hashes[i] = header.Hash()
   199  	}
   200  	// Deliver the headers to the downloader
   201  	req := &eth.Request{
   202  		Peer: dlp.id,
   203  	}
   204  	res := &eth.Response{
   205  		Req:  req,
   206  		Res:  (*eth.BlockHeadersPacket)(&headers),
   207  		Meta: hashes,
   208  		Time: 1,
   209  		Done: make(chan error, 1), // Ignore the returned status
   210  	}
   211  	go func() {
   212  		sink <- res
   213  	}()
   214  	return req, nil
   215  }
   216  
   217  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   218  // origin; associated with a particular peer in the download tester. The returned
   219  // function can be used to retrieve batches of headers from the particular peer.
   220  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
   221  	// Service the header query via the live handler code
   222  	rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{
   223  		Origin: eth.HashOrNumber{
   224  			Number: origin,
   225  		},
   226  		Amount:  uint64(amount),
   227  		Skip:    uint64(skip),
   228  		Reverse: reverse,
   229  	}, nil)
   230  	headers := unmarshalRlpHeaders(rlpHeaders)
   231  	// If a malicious peer is simulated withholding headers, delete them
   232  	for hash := range dlp.withholdHeaders {
   233  		for i, header := range headers {
   234  			if header.Hash() == hash {
   235  				headers = append(headers[:i], headers[i+1:]...)
   236  				break
   237  			}
   238  		}
   239  	}
   240  	hashes := make([]common.Hash, len(headers))
   241  	for i, header := range headers {
   242  		hashes[i] = header.Hash()
   243  	}
   244  	// Deliver the headers to the downloader
   245  	req := &eth.Request{
   246  		Peer: dlp.id,
   247  	}
   248  	res := &eth.Response{
   249  		Req:  req,
   250  		Res:  (*eth.BlockHeadersPacket)(&headers),
   251  		Meta: hashes,
   252  		Time: 1,
   253  		Done: make(chan error, 1), // Ignore the returned status
   254  	}
   255  	go func() {
   256  		sink <- res
   257  	}()
   258  	return req, nil
   259  }
   260  
   261  // RequestBodies constructs a getBlockBodies method associated with a particular
   262  // peer in the download tester. The returned function can be used to retrieve
   263  // batches of block bodies from the particularly requested peer.
   264  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
   265  	blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes)
   266  
   267  	bodies := make([]*eth.BlockBody, len(blobs))
   268  	for i, blob := range blobs {
   269  		bodies[i] = new(eth.BlockBody)
   270  		rlp.DecodeBytes(blob, bodies[i])
   271  	}
   272  	var (
   273  		txsHashes   = make([]common.Hash, len(bodies))
   274  		uncleHashes = make([]common.Hash, len(bodies))
   275  	)
   276  	hasher := trie.NewStackTrie(nil)
   277  	for i, body := range bodies {
   278  		txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
   279  		uncleHashes[i] = types.CalcUncleHash(body.Uncles)
   280  	}
   281  	req := &eth.Request{
   282  		Peer: dlp.id,
   283  	}
   284  	res := &eth.Response{
   285  		Req:  req,
   286  		Res:  (*eth.BlockBodiesPacket)(&bodies),
   287  		Meta: [][]common.Hash{txsHashes, uncleHashes},
   288  		Time: 1,
   289  		Done: make(chan error, 1), // Ignore the returned status
   290  	}
   291  	go func() {
   292  		sink <- res
   293  	}()
   294  	return req, nil
   295  }
   296  
   297  // RequestReceipts constructs a getReceipts method associated with a particular
   298  // peer in the download tester. The returned function can be used to retrieve
   299  // batches of block receipts from the particularly requested peer.
   300  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
   301  	blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes)
   302  
   303  	receipts := make([][]*types.Receipt, len(blobs))
   304  	for i, blob := range blobs {
   305  		rlp.DecodeBytes(blob, &receipts[i])
   306  	}
   307  	hasher := trie.NewStackTrie(nil)
   308  	hashes = make([]common.Hash, len(receipts))
   309  	for i, receipt := range receipts {
   310  		hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
   311  	}
   312  	req := &eth.Request{
   313  		Peer: dlp.id,
   314  	}
   315  	res := &eth.Response{
   316  		Req:  req,
   317  		Res:  (*eth.ReceiptsPacket)(&receipts),
   318  		Meta: hashes,
   319  		Time: 1,
   320  		Done: make(chan error, 1), // Ignore the returned status
   321  	}
   322  	go func() {
   323  		sink <- res
   324  	}()
   325  	return req, nil
   326  }
   327  
   328  // ID retrieves the peer's unique identifier.
   329  func (dlp *downloadTesterPeer) ID() string {
   330  	return dlp.id
   331  }
   332  
   333  // RequestAccountRange fetches a batch of accounts rooted in a specific account
   334  // trie, starting with the origin.
   335  func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   336  	// Create the request and service it
   337  	req := &snap.GetAccountRangePacket{
   338  		ID:     id,
   339  		Root:   root,
   340  		Origin: origin,
   341  		Limit:  limit,
   342  		Bytes:  bytes,
   343  	}
   344  	slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req)
   345  
   346  	// We need to convert to non-slim format, delegate to the packet code
   347  	res := &snap.AccountRangePacket{
   348  		ID:       id,
   349  		Accounts: slimaccs,
   350  		Proof:    proofs,
   351  	}
   352  	hashes, accounts, _ := res.Unpack()
   353  
   354  	go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs)
   355  	return nil
   356  }
   357  
   358  // RequestStorageRanges fetches a batch of storage slots belonging to one or
   359  // more accounts. If slots from only one accout is requested, an origin marker
   360  // may also be used to retrieve from there.
   361  func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   362  	// Create the request and service it
   363  	req := &snap.GetStorageRangesPacket{
   364  		ID:       id,
   365  		Accounts: accounts,
   366  		Root:     root,
   367  		Origin:   origin,
   368  		Limit:    limit,
   369  		Bytes:    bytes,
   370  	}
   371  	storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req)
   372  
   373  	// We need to convert to demultiplex, delegate to the packet code
   374  	res := &snap.StorageRangesPacket{
   375  		ID:    id,
   376  		Slots: storage,
   377  		Proof: proofs,
   378  	}
   379  	hashes, slots := res.Unpack()
   380  
   381  	go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs)
   382  	return nil
   383  }
   384  
   385  // RequestByteCodes fetches a batch of bytecodes by hash.
   386  func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   387  	req := &snap.GetByteCodesPacket{
   388  		ID:     id,
   389  		Hashes: hashes,
   390  		Bytes:  bytes,
   391  	}
   392  	codes := snap.ServiceGetByteCodesQuery(dlp.chain, req)
   393  	go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes)
   394  	return nil
   395  }
   396  
   397  // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   398  // a specificstate trie.
   399  func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error {
   400  	req := &snap.GetTrieNodesPacket{
   401  		ID:    id,
   402  		Root:  root,
   403  		Paths: paths,
   404  		Bytes: bytes,
   405  	}
   406  	nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now())
   407  	go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes)
   408  	return nil
   409  }
   410  
   411  // Log retrieves the peer's own contextual logger.
   412  func (dlp *downloadTesterPeer) Log() log.Logger {
   413  	return log.New("peer", dlp.id)
   414  }
   415  
   416  // assertOwnChain checks if the local chain contains the correct number of items
   417  // of the various chain components.
   418  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   419  	// Mark this method as a helper to report errors at callsite, not in here
   420  	t.Helper()
   421  
   422  	headers, blocks, receipts := length, length, length
   423  	if tester.downloader.getMode() == LightSync {
   424  		blocks, receipts = 1, 1
   425  	}
   426  	if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers {
   427  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   428  	}
   429  	if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != blocks {
   430  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   431  	}
   432  	if rs := int(tester.chain.CurrentFastBlock().NumberU64()) + 1; rs != receipts {
   433  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   434  	}
   435  }
   436  
   437  func TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, eth.ETH66, FullSync) }
   438  func TestCanonicalSynchronisation66Snap(t *testing.T)  { testCanonSync(t, eth.ETH66, SnapSync) }
   439  func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
   440  
   441  func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
   442  	tester := newTester(t)
   443  	defer tester.terminate()
   444  
   445  	// Create a small enough block chain to download
   446  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   447  	tester.newPeer("peer", protocol, chain.blocks[1:])
   448  
   449  	// Synchronise with the peer and make sure all relevant data was retrieved
   450  	if err := tester.sync("peer", nil, mode); err != nil {
   451  		t.Fatalf("failed to synchronise blocks: %v", err)
   452  	}
   453  	assertOwnChain(t, tester, len(chain.blocks))
   454  }
   455  
   456  // Tests that if a large batch of blocks are being downloaded, it is throttled
   457  // until the cached blocks are retrieved.
   458  func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
   459  func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) }
   460  
   461  func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
   462  	tester := newTester(t)
   463  	defer tester.terminate()
   464  
   465  	// Create a long block chain to download and the tester
   466  	targetBlocks := len(testChainBase.blocks) - 1
   467  	tester.newPeer("peer", protocol, testChainBase.blocks[1:])
   468  
   469  	// Wrap the importer to allow stepping
   470  	blocked, proceed := uint32(0), make(chan struct{})
   471  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   472  		atomic.StoreUint32(&blocked, uint32(len(results)))
   473  		<-proceed
   474  	}
   475  	// Start a synchronisation concurrently
   476  	errc := make(chan error, 1)
   477  	go func() {
   478  		errc <- tester.sync("peer", nil, mode)
   479  	}()
   480  	// Iteratively take some blocks, always checking the retrieval count
   481  	for {
   482  		// Check the retrieval count synchronously (! reason for this ugly block)
   483  		tester.lock.RLock()
   484  		retrieved := int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1
   485  		tester.lock.RUnlock()
   486  		if retrieved >= targetBlocks+1 {
   487  			break
   488  		}
   489  		// Wait a bit for sync to throttle itself
   490  		var cached, frozen int
   491  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   492  			time.Sleep(25 * time.Millisecond)
   493  
   494  			tester.lock.Lock()
   495  			tester.downloader.queue.lock.Lock()
   496  			tester.downloader.queue.resultCache.lock.Lock()
   497  			{
   498  				cached = tester.downloader.queue.resultCache.countCompleted()
   499  				frozen = int(atomic.LoadUint32(&blocked))
   500  				retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1
   501  			}
   502  			tester.downloader.queue.resultCache.lock.Unlock()
   503  			tester.downloader.queue.lock.Unlock()
   504  			tester.lock.Unlock()
   505  
   506  			if cached == blockCacheMaxItems ||
   507  				cached == blockCacheMaxItems-reorgProtHeaderDelay ||
   508  				retrieved+cached+frozen == targetBlocks+1 ||
   509  				retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   510  				break
   511  			}
   512  		}
   513  		// Make sure we filled up the cache, then exhaust it
   514  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   515  		tester.lock.RLock()
   516  		retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1
   517  		tester.lock.RUnlock()
   518  		if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   519  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
   520  		}
   521  		// Permit the blocked blocks to import
   522  		if atomic.LoadUint32(&blocked) > 0 {
   523  			atomic.StoreUint32(&blocked, uint32(0))
   524  			proceed <- struct{}{}
   525  		}
   526  	}
   527  	// Check that we haven't pulled more blocks than available
   528  	assertOwnChain(t, tester, targetBlocks+1)
   529  	if err := <-errc; err != nil {
   530  		t.Fatalf("block synchronization failed: %v", err)
   531  	}
   532  }
   533  
   534  // Tests that simple synchronization against a forked chain works correctly. In
   535  // this test common ancestor lookup should *not* be short circuited, and a full
   536  // binary search should be executed.
   537  func TestForkedSync66Full(t *testing.T)  { testForkedSync(t, eth.ETH66, FullSync) }
   538  func TestForkedSync66Snap(t *testing.T)  { testForkedSync(t, eth.ETH66, SnapSync) }
   539  func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
   540  
   541  func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   542  	tester := newTester(t)
   543  	defer tester.terminate()
   544  
   545  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
   546  	chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81)
   547  	tester.newPeer("fork A", protocol, chainA.blocks[1:])
   548  	tester.newPeer("fork B", protocol, chainB.blocks[1:])
   549  	// Synchronise with the peer and make sure all blocks were retrieved
   550  	if err := tester.sync("fork A", nil, mode); err != nil {
   551  		t.Fatalf("failed to synchronise blocks: %v", err)
   552  	}
   553  	assertOwnChain(t, tester, len(chainA.blocks))
   554  
   555  	// Synchronise with the second peer and make sure that fork is pulled too
   556  	if err := tester.sync("fork B", nil, mode); err != nil {
   557  		t.Fatalf("failed to synchronise blocks: %v", err)
   558  	}
   559  	assertOwnChain(t, tester, len(chainB.blocks))
   560  }
   561  
   562  // Tests that synchronising against a much shorter but much heavyer fork works
   563  // corrently and is not dropped.
   564  func TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FullSync) }
   565  func TestHeavyForkedSync66Snap(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, SnapSync) }
   566  func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
   567  
   568  func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   569  	tester := newTester(t)
   570  	defer tester.terminate()
   571  
   572  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
   573  	chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79)
   574  	tester.newPeer("light", protocol, chainA.blocks[1:])
   575  	tester.newPeer("heavy", protocol, chainB.blocks[1:])
   576  
   577  	// Synchronise with the peer and make sure all blocks were retrieved
   578  	if err := tester.sync("light", nil, mode); err != nil {
   579  		t.Fatalf("failed to synchronise blocks: %v", err)
   580  	}
   581  	assertOwnChain(t, tester, len(chainA.blocks))
   582  
   583  	// Synchronise with the second peer and make sure that fork is pulled too
   584  	if err := tester.sync("heavy", nil, mode); err != nil {
   585  		t.Fatalf("failed to synchronise blocks: %v", err)
   586  	}
   587  	assertOwnChain(t, tester, len(chainB.blocks))
   588  }
   589  
   590  // Tests that chain forks are contained within a certain interval of the current
   591  // chain head, ensuring that malicious peers cannot waste resources by feeding
   592  // long dead chains.
   593  func TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FullSync) }
   594  func TestBoundedForkedSync66Snap(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, SnapSync) }
   595  func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
   596  
   597  func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   598  	tester := newTester(t)
   599  	defer tester.terminate()
   600  
   601  	chainA := testChainForkLightA
   602  	chainB := testChainForkLightB
   603  	tester.newPeer("original", protocol, chainA.blocks[1:])
   604  	tester.newPeer("rewriter", protocol, chainB.blocks[1:])
   605  
   606  	// Synchronise with the peer and make sure all blocks were retrieved
   607  	if err := tester.sync("original", nil, mode); err != nil {
   608  		t.Fatalf("failed to synchronise blocks: %v", err)
   609  	}
   610  	assertOwnChain(t, tester, len(chainA.blocks))
   611  
   612  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   613  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   614  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   615  	}
   616  }
   617  
   618  // Tests that chain forks are contained within a certain interval of the current
   619  // chain head for short but heavy forks too. These are a bit special because they
   620  // take different ancestor lookup paths.
   621  func TestBoundedHeavyForkedSync66Full(t *testing.T) {
   622  	testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
   623  }
   624  func TestBoundedHeavyForkedSync66Snap(t *testing.T) {
   625  	testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync)
   626  }
   627  func TestBoundedHeavyForkedSync66Light(t *testing.T) {
   628  	testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
   629  }
   630  
   631  func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   632  	tester := newTester(t)
   633  	defer tester.terminate()
   634  
   635  	// Create a long enough forked chain
   636  	chainA := testChainForkLightA
   637  	chainB := testChainForkHeavy
   638  	tester.newPeer("original", protocol, chainA.blocks[1:])
   639  
   640  	// Synchronise with the peer and make sure all blocks were retrieved
   641  	if err := tester.sync("original", nil, mode); err != nil {
   642  		t.Fatalf("failed to synchronise blocks: %v", err)
   643  	}
   644  	assertOwnChain(t, tester, len(chainA.blocks))
   645  
   646  	tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:])
   647  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   648  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   649  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   650  	}
   651  }
   652  
   653  // Tests that a canceled download wipes all previously accumulated state.
   654  func TestCancel66Full(t *testing.T)  { testCancel(t, eth.ETH66, FullSync) }
   655  func TestCancel66Snap(t *testing.T)  { testCancel(t, eth.ETH66, SnapSync) }
   656  func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
   657  
   658  func testCancel(t *testing.T, protocol uint, mode SyncMode) {
   659  	tester := newTester(t)
   660  	defer tester.terminate()
   661  
   662  	chain := testChainBase.shorten(MaxHeaderFetch)
   663  	tester.newPeer("peer", protocol, chain.blocks[1:])
   664  
   665  	// Make sure canceling works with a pristine downloader
   666  	tester.downloader.Cancel()
   667  	if !tester.downloader.queue.Idle() {
   668  		t.Errorf("download queue not idle")
   669  	}
   670  	// Synchronise with the peer, but cancel afterwards
   671  	if err := tester.sync("peer", nil, mode); err != nil {
   672  		t.Fatalf("failed to synchronise blocks: %v", err)
   673  	}
   674  	tester.downloader.Cancel()
   675  	if !tester.downloader.queue.Idle() {
   676  		t.Errorf("download queue not idle")
   677  	}
   678  }
   679  
   680  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   681  func TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FullSync) }
   682  func TestMultiSynchronisation66Snap(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, SnapSync) }
   683  func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
   684  
   685  func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
   686  	tester := newTester(t)
   687  	defer tester.terminate()
   688  
   689  	// Create various peers with various parts of the chain
   690  	targetPeers := 8
   691  	chain := testChainBase.shorten(targetPeers * 100)
   692  
   693  	for i := 0; i < targetPeers; i++ {
   694  		id := fmt.Sprintf("peer #%d", i)
   695  		tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:])
   696  	}
   697  	if err := tester.sync("peer #0", nil, mode); err != nil {
   698  		t.Fatalf("failed to synchronise blocks: %v", err)
   699  	}
   700  	assertOwnChain(t, tester, len(chain.blocks))
   701  }
   702  
   703  // Tests that synchronisations behave well in multi-version protocol environments
   704  // and not wreak havoc on other nodes in the network.
   705  func TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FullSync) }
   706  func TestMultiProtoSynchronisation66Snap(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, SnapSync) }
   707  func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
   708  
   709  func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
   710  	tester := newTester(t)
   711  	defer tester.terminate()
   712  
   713  	// Create a small enough block chain to download
   714  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   715  
   716  	// Create peers of every type
   717  	tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:])
   718  	//tester.newPeer("peer 65", eth.ETH67, chain.blocks[1:)
   719  
   720  	// Synchronise with the requested peer and make sure all blocks were retrieved
   721  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   722  		t.Fatalf("failed to synchronise blocks: %v", err)
   723  	}
   724  	assertOwnChain(t, tester, len(chain.blocks))
   725  
   726  	// Check that no peers have been dropped off
   727  	for _, version := range []int{66} {
   728  		peer := fmt.Sprintf("peer %d", version)
   729  		if _, ok := tester.peers[peer]; !ok {
   730  			t.Errorf("%s dropped", peer)
   731  		}
   732  	}
   733  }
   734  
   735  // Tests that if a block is empty (e.g. header only), no body request should be
   736  // made, and instead the header should be assembled into a whole block in itself.
   737  func TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
   738  func TestEmptyShortCircuit66Snap(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, SnapSync) }
   739  func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
   740  
   741  func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
   742  	tester := newTester(t)
   743  	defer tester.terminate()
   744  
   745  	// Create a block chain to download
   746  	chain := testChainBase
   747  	tester.newPeer("peer", protocol, chain.blocks[1:])
   748  
   749  	// Instrument the downloader to signal body requests
   750  	bodiesHave, receiptsHave := int32(0), int32(0)
   751  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   752  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   753  	}
   754  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   755  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   756  	}
   757  	// Synchronise with the peer and make sure all blocks were retrieved
   758  	if err := tester.sync("peer", nil, mode); err != nil {
   759  		t.Fatalf("failed to synchronise blocks: %v", err)
   760  	}
   761  	assertOwnChain(t, tester, len(chain.blocks))
   762  
   763  	// Validate the number of block bodies that should have been requested
   764  	bodiesNeeded, receiptsNeeded := 0, 0
   765  	for _, block := range chain.blocks[1:] {
   766  		if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   767  			bodiesNeeded++
   768  		}
   769  	}
   770  	for _, block := range chain.blocks[1:] {
   771  		if mode == SnapSync && len(block.Transactions()) > 0 {
   772  			receiptsNeeded++
   773  		}
   774  	}
   775  	if int(bodiesHave) != bodiesNeeded {
   776  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   777  	}
   778  	if int(receiptsHave) != receiptsNeeded {
   779  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   780  	}
   781  }
   782  
   783  // Tests that headers are enqueued continuously, preventing malicious nodes from
   784  // stalling the downloader by feeding gapped header chains.
   785  func TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
   786  func TestMissingHeaderAttack66Snap(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, SnapSync) }
   787  func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
   788  
   789  func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   790  	tester := newTester(t)
   791  	defer tester.terminate()
   792  
   793  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   794  
   795  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
   796  	attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{}
   797  
   798  	if err := tester.sync("attack", nil, mode); err == nil {
   799  		t.Fatalf("succeeded attacker synchronisation")
   800  	}
   801  	// Synchronise with the valid peer and make sure sync succeeds
   802  	tester.newPeer("valid", protocol, chain.blocks[1:])
   803  	if err := tester.sync("valid", nil, mode); err != nil {
   804  		t.Fatalf("failed to synchronise blocks: %v", err)
   805  	}
   806  	assertOwnChain(t, tester, len(chain.blocks))
   807  }
   808  
   809  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   810  // detects the invalid numbering.
   811  func TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
   812  func TestShiftedHeaderAttack66Snap(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) }
   813  func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
   814  
   815  func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   816  	tester := newTester(t)
   817  	defer tester.terminate()
   818  
   819  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   820  
   821  	// Attempt a full sync with an attacker feeding shifted headers
   822  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
   823  	attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{}
   824  
   825  	if err := tester.sync("attack", nil, mode); err == nil {
   826  		t.Fatalf("succeeded attacker synchronisation")
   827  	}
   828  	// Synchronise with the valid peer and make sure sync succeeds
   829  	tester.newPeer("valid", protocol, chain.blocks[1:])
   830  	if err := tester.sync("valid", nil, mode); err != nil {
   831  		t.Fatalf("failed to synchronise blocks: %v", err)
   832  	}
   833  	assertOwnChain(t, tester, len(chain.blocks))
   834  }
   835  
   836  // Tests that upon detecting an invalid header, the recent ones are rolled back
   837  // for various failure scenarios. Afterwards a full sync is attempted to make
   838  // sure no state was corrupted.
   839  func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) }
   840  
   841  func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
   842  	tester := newTester(t)
   843  	defer tester.terminate()
   844  
   845  	// Create a small enough block chain to download
   846  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
   847  	chain := testChainBase.shorten(targetBlocks)
   848  
   849  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
   850  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
   851  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
   852  
   853  	fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:])
   854  	fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
   855  
   856  	if err := tester.sync("fast-attack", nil, mode); err == nil {
   857  		t.Fatalf("succeeded fast attacker synchronisation")
   858  	}
   859  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
   860  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
   861  	}
   862  	// Attempt to sync with an attacker that feeds junk during the block import phase.
   863  	// This should result in both the last fsHeaderSafetyNet number of headers being
   864  	// rolled back, and also the pivot point being reverted to a non-block status.
   865  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
   866  
   867  	blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:])
   868  	fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in
   869  	blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
   870  
   871  	if err := tester.sync("block-attack", nil, mode); err == nil {
   872  		t.Fatalf("succeeded block attacker synchronisation")
   873  	}
   874  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   875  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   876  	}
   877  	if mode == SnapSync {
   878  		if head := tester.chain.CurrentBlock().NumberU64(); head != 0 {
   879  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   880  		}
   881  	}
   882  	// Attempt to sync with an attacker that withholds promised blocks after the
   883  	// fast sync pivot point. This could be a trial to leave the node with a bad
   884  	// but already imported pivot block.
   885  	withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:])
   886  
   887  	tester.downloader.syncInitHook = func(uint64, uint64) {
   888  		for i := missing; i < len(chain.blocks); i++ {
   889  			withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
   890  		}
   891  		tester.downloader.syncInitHook = nil
   892  	}
   893  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
   894  		t.Fatalf("succeeded withholding attacker synchronisation")
   895  	}
   896  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   897  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   898  	}
   899  	if mode == SnapSync {
   900  		if head := tester.chain.CurrentBlock().NumberU64(); head != 0 {
   901  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   902  		}
   903  	}
   904  	// Synchronise with the valid peer and make sure sync succeeds. Since the last rollback
   905  	// should also disable fast syncing for this process, verify that we did a fresh full
   906  	// sync. Note, we can't assert anything about the receipts since we won't purge the
   907  	// database of them, hence we can't use assertOwnChain.
   908  	tester.newPeer("valid", protocol, chain.blocks[1:])
   909  	if err := tester.sync("valid", nil, mode); err != nil {
   910  		t.Fatalf("failed to synchronise blocks: %v", err)
   911  	}
   912  	assertOwnChain(t, tester, len(chain.blocks))
   913  }
   914  
   915  // Tests that a peer advertising a high TD doesn't get to stall the downloader
   916  // afterwards by not sending any useful hashes.
   917  func TestHighTDStarvationAttack66Full(t *testing.T) {
   918  	testHighTDStarvationAttack(t, eth.ETH66, FullSync)
   919  }
   920  func TestHighTDStarvationAttack66Snap(t *testing.T) {
   921  	testHighTDStarvationAttack(t, eth.ETH66, SnapSync)
   922  }
   923  func TestHighTDStarvationAttack66Light(t *testing.T) {
   924  	testHighTDStarvationAttack(t, eth.ETH66, LightSync)
   925  }
   926  
   927  func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
   928  	tester := newTester(t)
   929  	defer tester.terminate()
   930  
   931  	chain := testChainBase.shorten(1)
   932  	tester.newPeer("attack", protocol, chain.blocks[1:])
   933  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
   934  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
   935  	}
   936  }
   937  
   938  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
   939  func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
   940  
   941  func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
   942  	// Define the disconnection requirement for individual hash fetch errors
   943  	tests := []struct {
   944  		result error
   945  		drop   bool
   946  	}{
   947  		{nil, false},                        // Sync succeeded, all is well
   948  		{errBusy, false},                    // Sync is already in progress, no problem
   949  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
   950  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
   951  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
   952  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
   953  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
   954  		{errTimeout, true},                  // No hashes received in due time, drop the peer
   955  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
   956  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
   957  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
   958  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
   959  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
   960  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
   961  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
   962  	}
   963  	// Run the tests and check disconnection status
   964  	tester := newTester(t)
   965  	defer tester.terminate()
   966  	chain := testChainBase.shorten(1)
   967  
   968  	for i, tt := range tests {
   969  		// Register a new peer and ensure its presence
   970  		id := fmt.Sprintf("test %d", i)
   971  		tester.newPeer(id, protocol, chain.blocks[1:])
   972  		if _, ok := tester.peers[id]; !ok {
   973  			t.Fatalf("test %d: registered peer not found", i)
   974  		}
   975  		// Simulate a synchronisation and check the required result
   976  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
   977  
   978  		tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync)
   979  		if _, ok := tester.peers[id]; !ok != tt.drop {
   980  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
   981  		}
   982  	}
   983  }
   984  
   985  // Tests that synchronisation progress (origin block number, current block number
   986  // and highest block number) is tracked and updated correctly.
   987  func TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, eth.ETH66, FullSync) }
   988  func TestSyncProgress66Snap(t *testing.T)  { testSyncProgress(t, eth.ETH66, SnapSync) }
   989  func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
   990  
   991  func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
   992  	tester := newTester(t)
   993  	defer tester.terminate()
   994  
   995  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   996  
   997  	// Set a sync init hook to catch progress changes
   998  	starting := make(chan struct{})
   999  	progress := make(chan struct{})
  1000  
  1001  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1002  		starting <- struct{}{}
  1003  		<-progress
  1004  	}
  1005  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1006  
  1007  	// Synchronise half the blocks and check initial progress
  1008  	tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:])
  1009  	pending := new(sync.WaitGroup)
  1010  	pending.Add(1)
  1011  
  1012  	go func() {
  1013  		defer pending.Done()
  1014  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1015  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1016  		}
  1017  	}()
  1018  	<-starting
  1019  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1020  		HighestBlock: uint64(len(chain.blocks)/2 - 1),
  1021  	})
  1022  	progress <- struct{}{}
  1023  	pending.Wait()
  1024  
  1025  	// Synchronise all the blocks and check continuation progress
  1026  	tester.newPeer("peer-full", protocol, chain.blocks[1:])
  1027  	pending.Add(1)
  1028  	go func() {
  1029  		defer pending.Done()
  1030  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1031  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1032  		}
  1033  	}()
  1034  	<-starting
  1035  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1036  		StartingBlock: uint64(len(chain.blocks)/2 - 1),
  1037  		CurrentBlock:  uint64(len(chain.blocks)/2 - 1),
  1038  		HighestBlock:  uint64(len(chain.blocks) - 1),
  1039  	})
  1040  
  1041  	// Check final progress after successful sync
  1042  	progress <- struct{}{}
  1043  	pending.Wait()
  1044  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1045  		StartingBlock: uint64(len(chain.blocks)/2 - 1),
  1046  		CurrentBlock:  uint64(len(chain.blocks) - 1),
  1047  		HighestBlock:  uint64(len(chain.blocks) - 1),
  1048  	})
  1049  }
  1050  
  1051  func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1052  	// Mark this method as a helper to report errors at callsite, not in here
  1053  	t.Helper()
  1054  
  1055  	p := d.Progress()
  1056  	if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock {
  1057  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1058  	}
  1059  }
  1060  
  1061  // Tests that synchronisation progress (origin block number and highest block
  1062  // number) is tracked and updated correctly in case of a fork (or manual head
  1063  // revertal).
  1064  func TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FullSync) }
  1065  func TestForkedSyncProgress66Snap(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, SnapSync) }
  1066  func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
  1067  
  1068  func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1069  	tester := newTester(t)
  1070  	defer tester.terminate()
  1071  
  1072  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
  1073  	chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
  1074  
  1075  	// Set a sync init hook to catch progress changes
  1076  	starting := make(chan struct{})
  1077  	progress := make(chan struct{})
  1078  
  1079  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1080  		starting <- struct{}{}
  1081  		<-progress
  1082  	}
  1083  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1084  
  1085  	// Synchronise with one of the forks and check progress
  1086  	tester.newPeer("fork A", protocol, chainA.blocks[1:])
  1087  	pending := new(sync.WaitGroup)
  1088  	pending.Add(1)
  1089  	go func() {
  1090  		defer pending.Done()
  1091  		if err := tester.sync("fork A", nil, mode); err != nil {
  1092  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1093  		}
  1094  	}()
  1095  	<-starting
  1096  
  1097  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1098  		HighestBlock: uint64(len(chainA.blocks) - 1),
  1099  	})
  1100  	progress <- struct{}{}
  1101  	pending.Wait()
  1102  
  1103  	// Simulate a successful sync above the fork
  1104  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1105  
  1106  	// Synchronise with the second fork and check progress resets
  1107  	tester.newPeer("fork B", protocol, chainB.blocks[1:])
  1108  	pending.Add(1)
  1109  	go func() {
  1110  		defer pending.Done()
  1111  		if err := tester.sync("fork B", nil, mode); err != nil {
  1112  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1113  		}
  1114  	}()
  1115  	<-starting
  1116  	checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1117  		StartingBlock: uint64(len(testChainBase.blocks)) - 1,
  1118  		CurrentBlock:  uint64(len(chainA.blocks) - 1),
  1119  		HighestBlock:  uint64(len(chainB.blocks) - 1),
  1120  	})
  1121  
  1122  	// Check final progress after successful sync
  1123  	progress <- struct{}{}
  1124  	pending.Wait()
  1125  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1126  		StartingBlock: uint64(len(testChainBase.blocks)) - 1,
  1127  		CurrentBlock:  uint64(len(chainB.blocks) - 1),
  1128  		HighestBlock:  uint64(len(chainB.blocks) - 1),
  1129  	})
  1130  }
  1131  
  1132  // Tests that if synchronisation is aborted due to some failure, then the progress
  1133  // origin is not updated in the next sync cycle, as it should be considered the
  1134  // continuation of the previous sync and not a new instance.
  1135  func TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FullSync) }
  1136  func TestFailedSyncProgress66Snap(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, SnapSync) }
  1137  func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
  1138  
  1139  func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1140  	tester := newTester(t)
  1141  	defer tester.terminate()
  1142  
  1143  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1144  
  1145  	// Set a sync init hook to catch progress changes
  1146  	starting := make(chan struct{})
  1147  	progress := make(chan struct{})
  1148  
  1149  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1150  		starting <- struct{}{}
  1151  		<-progress
  1152  	}
  1153  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1154  
  1155  	// Attempt a full sync with a faulty peer
  1156  	missing := len(chain.blocks)/2 - 1
  1157  
  1158  	faulter := tester.newPeer("faulty", protocol, chain.blocks[1:])
  1159  	faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
  1160  
  1161  	pending := new(sync.WaitGroup)
  1162  	pending.Add(1)
  1163  	go func() {
  1164  		defer pending.Done()
  1165  		if err := tester.sync("faulty", nil, mode); err == nil {
  1166  			panic("succeeded faulty synchronisation")
  1167  		}
  1168  	}()
  1169  	<-starting
  1170  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1171  		HighestBlock: uint64(len(chain.blocks) - 1),
  1172  	})
  1173  	progress <- struct{}{}
  1174  	pending.Wait()
  1175  	afterFailedSync := tester.downloader.Progress()
  1176  
  1177  	// Synchronise with a good peer and check that the progress origin remind the same
  1178  	// after a failure
  1179  	tester.newPeer("valid", protocol, chain.blocks[1:])
  1180  	pending.Add(1)
  1181  	go func() {
  1182  		defer pending.Done()
  1183  		if err := tester.sync("valid", nil, mode); err != nil {
  1184  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1185  		}
  1186  	}()
  1187  	<-starting
  1188  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1189  
  1190  	// Check final progress after successful sync
  1191  	progress <- struct{}{}
  1192  	pending.Wait()
  1193  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1194  		CurrentBlock: uint64(len(chain.blocks) - 1),
  1195  		HighestBlock: uint64(len(chain.blocks) - 1),
  1196  	})
  1197  }
  1198  
  1199  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1200  // the progress height is successfully reduced at the next sync invocation.
  1201  func TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FullSync) }
  1202  func TestFakedSyncProgress66Snap(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, SnapSync) }
  1203  func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
  1204  
  1205  func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1206  	tester := newTester(t)
  1207  	defer tester.terminate()
  1208  
  1209  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1210  
  1211  	// Set a sync init hook to catch progress changes
  1212  	starting := make(chan struct{})
  1213  	progress := make(chan struct{})
  1214  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1215  		starting <- struct{}{}
  1216  		<-progress
  1217  	}
  1218  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1219  
  1220  	// Create and sync with an attacker that promises a higher chain than available.
  1221  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
  1222  	numMissing := 5
  1223  	for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- {
  1224  		attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
  1225  	}
  1226  	pending := new(sync.WaitGroup)
  1227  	pending.Add(1)
  1228  	go func() {
  1229  		defer pending.Done()
  1230  		if err := tester.sync("attack", nil, mode); err == nil {
  1231  			panic("succeeded attacker synchronisation")
  1232  		}
  1233  	}()
  1234  	<-starting
  1235  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1236  		HighestBlock: uint64(len(chain.blocks) - 1),
  1237  	})
  1238  	progress <- struct{}{}
  1239  	pending.Wait()
  1240  	afterFailedSync := tester.downloader.Progress()
  1241  
  1242  	// Synchronise with a good peer and check that the progress height has been reduced to
  1243  	// the true value.
  1244  	validChain := chain.shorten(len(chain.blocks) - numMissing)
  1245  	tester.newPeer("valid", protocol, validChain.blocks[1:])
  1246  	pending.Add(1)
  1247  
  1248  	go func() {
  1249  		defer pending.Done()
  1250  		if err := tester.sync("valid", nil, mode); err != nil {
  1251  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1252  		}
  1253  	}()
  1254  	<-starting
  1255  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1256  		CurrentBlock: afterFailedSync.CurrentBlock,
  1257  		HighestBlock: uint64(len(validChain.blocks) - 1),
  1258  	})
  1259  	// Check final progress after successful sync.
  1260  	progress <- struct{}{}
  1261  	pending.Wait()
  1262  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1263  		CurrentBlock: uint64(len(validChain.blocks) - 1),
  1264  		HighestBlock: uint64(len(validChain.blocks) - 1),
  1265  	})
  1266  }
  1267  
  1268  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1269  	testCases := []struct {
  1270  		remoteHeight uint64
  1271  		localHeight  uint64
  1272  		expected     []int
  1273  	}{
  1274  		// Remote is way higher. We should ask for the remote head and go backwards
  1275  		{1500, 1000,
  1276  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1277  		},
  1278  		{15000, 13006,
  1279  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1280  		},
  1281  		// Remote is pretty close to us. We don't have to fetch as many
  1282  		{1200, 1150,
  1283  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1284  		},
  1285  		// Remote is equal to us (so on a fork with higher td)
  1286  		// We should get the closest couple of ancestors
  1287  		{1500, 1500,
  1288  			[]int{1497, 1499},
  1289  		},
  1290  		// We're higher than the remote! Odd
  1291  		{1000, 1500,
  1292  			[]int{997, 999},
  1293  		},
  1294  		// Check some weird edgecases that it behaves somewhat rationally
  1295  		{0, 1500,
  1296  			[]int{0, 2},
  1297  		},
  1298  		{6000000, 0,
  1299  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1300  		},
  1301  		{0, 0,
  1302  			[]int{0, 2},
  1303  		},
  1304  	}
  1305  	reqs := func(from, count, span int) []int {
  1306  		var r []int
  1307  		num := from
  1308  		for len(r) < count {
  1309  			r = append(r, num)
  1310  			num += span + 1
  1311  		}
  1312  		return r
  1313  	}
  1314  	for i, tt := range testCases {
  1315  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1316  		data := reqs(int(from), count, span)
  1317  
  1318  		if max != uint64(data[len(data)-1]) {
  1319  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1320  		}
  1321  		failed := false
  1322  		if len(data) != len(tt.expected) {
  1323  			failed = true
  1324  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1325  		} else {
  1326  			for j, n := range data {
  1327  				if n != tt.expected[j] {
  1328  					failed = true
  1329  					break
  1330  				}
  1331  			}
  1332  		}
  1333  		if failed {
  1334  			res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1335  			exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1336  			t.Logf("got: %v\n", res)
  1337  			t.Logf("exp: %v\n", exp)
  1338  			t.Errorf("test %d: wrong values", i)
  1339  		}
  1340  	}
  1341  }
  1342  
  1343  // Tests that peers below a pre-configured checkpoint block are prevented from
  1344  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1345  func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
  1346  func TestCheckpointEnforcement66Snap(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, SnapSync) }
  1347  func TestCheckpointEnforcement66Light(t *testing.T) {
  1348  	testCheckpointEnforcement(t, eth.ETH66, LightSync)
  1349  }
  1350  
  1351  func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
  1352  	// Create a new tester with a particular hard coded checkpoint block
  1353  	tester := newTester(t)
  1354  	defer tester.terminate()
  1355  
  1356  	tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1357  	chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1358  
  1359  	// Attempt to sync with the peer and validate the result
  1360  	tester.newPeer("peer", protocol, chain.blocks[1:])
  1361  
  1362  	var expect error
  1363  	if mode == SnapSync || mode == LightSync {
  1364  		expect = errUnsyncedPeer
  1365  	}
  1366  	if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1367  		t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1368  	}
  1369  	if mode == SnapSync || mode == LightSync {
  1370  		assertOwnChain(t, tester, 1)
  1371  	} else {
  1372  		assertOwnChain(t, tester, len(chain.blocks))
  1373  	}
  1374  }
  1375  
  1376  // Tests that peers below a pre-configured checkpoint block are prevented from
  1377  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1378  func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) }
  1379  func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) }
  1380  
  1381  func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
  1382  	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
  1383  
  1384  	var cases = []struct {
  1385  		name  string // The name of testing scenario
  1386  		local int    // The length of local chain(canonical chain assumed), 0 means genesis is the head
  1387  	}{
  1388  		{name: "Beacon sync since genesis", local: 0},
  1389  		{name: "Beacon sync with short local chain", local: 1},
  1390  		{name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2},
  1391  		{name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1},
  1392  	}
  1393  	for _, c := range cases {
  1394  		t.Run(c.name, func(t *testing.T) {
  1395  			success := make(chan struct{})
  1396  			tester := newTesterWithNotification(t, func() {
  1397  				close(success)
  1398  			})
  1399  			defer tester.terminate()
  1400  
  1401  			chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1402  			tester.newPeer("peer", protocol, chain.blocks[1:])
  1403  
  1404  			// Build the local chain segment if it's required
  1405  			if c.local > 0 {
  1406  				tester.chain.InsertChain(chain.blocks[1 : c.local+1])
  1407  			}
  1408  			if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header()); err != nil {
  1409  				t.Fatalf("Failed to beacon sync chain %v %v", c.name, err)
  1410  			}
  1411  			select {
  1412  			case <-success:
  1413  				// Ok, downloader fully cancelled after sync cycle
  1414  				if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != len(chain.blocks) {
  1415  					t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks))
  1416  				}
  1417  			case <-time.NewTimer(time.Second * 3).C:
  1418  				t.Fatalf("Failed to sync chain in three seconds")
  1419  			}
  1420  		})
  1421  	}
  1422  }