github.com/theQRL/go-zond@v0.1.1/zond/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"fmt"
    21  	"math/big"
    22  	"os"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/theQRL/go-zond"
    30  	"github.com/theQRL/go-zond/common"
    31  	"github.com/theQRL/go-zond/consensus/ethash"
    32  	"github.com/theQRL/go-zond/core"
    33  	"github.com/theQRL/go-zond/core/rawdb"
    34  	"github.com/theQRL/go-zond/core/types"
    35  	"github.com/theQRL/go-zond/core/vm"
    36  	"github.com/theQRL/go-zond/event"
    37  	"github.com/theQRL/go-zond/log"
    38  	"github.com/theQRL/go-zond/params"
    39  	"github.com/theQRL/go-zond/rlp"
    40  	"github.com/theQRL/go-zond/trie"
    41  	"github.com/theQRL/go-zond/zond/protocols/snap"
    42  	"github.com/theQRL/go-zond/zond/protocols/zond"
    43  )
    44  
    45  // downloadTester is a test simulator for mocking out local block chain.
    46  type downloadTester struct {
    47  	freezer    string
    48  	chain      *core.BlockChain
    49  	downloader *Downloader
    50  
    51  	peers map[string]*downloadTesterPeer
    52  	lock  sync.RWMutex
    53  }
    54  
    55  // newTester creates a new downloader test mocker.
    56  func newTester(t *testing.T) *downloadTester {
    57  	return newTesterWithNotification(t, nil)
    58  }
    59  
    60  // newTester creates a new downloader test mocker.
    61  func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
    62  	freezer := t.TempDir()
    63  	db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
    64  	if err != nil {
    65  		panic(err)
    66  	}
    67  	t.Cleanup(func() {
    68  		db.Close()
    69  	})
    70  	gspec := &core.Genesis{
    71  		Config:  params.TestChainConfig,
    72  		Alloc:   core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
    73  		BaseFee: big.NewInt(params.InitialBaseFee),
    74  	}
    75  	chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
    76  	if err != nil {
    77  		panic(err)
    78  	}
    79  	tester := &downloadTester{
    80  		freezer: freezer,
    81  		chain:   chain,
    82  		peers:   make(map[string]*downloadTesterPeer),
    83  	}
    84  	tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success)
    85  	return tester
    86  }
    87  
    88  // terminate aborts any operations on the embedded downloader and releases all
    89  // held resources.
    90  func (dl *downloadTester) terminate() {
    91  	dl.downloader.Terminate()
    92  	dl.chain.Stop()
    93  
    94  	os.RemoveAll(dl.freezer)
    95  }
    96  
    97  // sync starts synchronizing with a remote peer, blocking until it completes.
    98  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
    99  	head := dl.peers[id].chain.CurrentBlock()
   100  	if td == nil {
   101  		// If no particular TD was requested, load from the peer's blockchain
   102  		td = dl.peers[id].chain.GetTd(head.Hash(), head.Number.Uint64())
   103  	}
   104  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   105  	err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil)
   106  	select {
   107  	case <-dl.downloader.cancelCh:
   108  		// Ok, downloader fully cancelled after sync cycle
   109  	default:
   110  		// Downloader is still accepting packets, can block a peer up
   111  		panic("downloader active post sync cycle") // panic will be caught by tester
   112  	}
   113  	return err
   114  }
   115  
   116  // newPeer registers a new block download source into the downloader.
   117  func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer {
   118  	dl.lock.Lock()
   119  	defer dl.lock.Unlock()
   120  
   121  	peer := &downloadTesterPeer{
   122  		dl:              dl,
   123  		id:              id,
   124  		chain:           newTestBlockchain(blocks),
   125  		withholdHeaders: make(map[common.Hash]struct{}),
   126  	}
   127  	dl.peers[id] = peer
   128  
   129  	if err := dl.downloader.RegisterPeer(id, version, peer); err != nil {
   130  		panic(err)
   131  	}
   132  	if err := dl.downloader.SnapSyncer.Register(peer); err != nil {
   133  		panic(err)
   134  	}
   135  	return peer
   136  }
   137  
   138  // dropPeer simulates a hard peer removal from the connection pool.
   139  func (dl *downloadTester) dropPeer(id string) {
   140  	dl.lock.Lock()
   141  	defer dl.lock.Unlock()
   142  
   143  	delete(dl.peers, id)
   144  	dl.downloader.SnapSyncer.Unregister(id)
   145  	dl.downloader.UnregisterPeer(id)
   146  }
   147  
   148  type downloadTesterPeer struct {
   149  	dl    *downloadTester
   150  	id    string
   151  	chain *core.BlockChain
   152  
   153  	withholdHeaders map[common.Hash]struct{}
   154  }
   155  
   156  // Head constructs a function to retrieve a peer's current head hash
   157  // and total difficulty.
   158  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   159  	head := dlp.chain.CurrentBlock()
   160  	return head.Hash(), dlp.chain.GetTd(head.Hash(), head.Number.Uint64())
   161  }
   162  
   163  func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
   164  	var headers = make([]*types.Header, len(rlpdata))
   165  	for i, data := range rlpdata {
   166  		var h types.Header
   167  		if err := rlp.DecodeBytes(data, &h); err != nil {
   168  			panic(err)
   169  		}
   170  		headers[i] = &h
   171  	}
   172  	return headers
   173  }
   174  
   175  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   176  // origin; associated with a particular peer in the download tester. The returned
   177  // function can be used to retrieve batches of headers from the particular peer.
   178  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *zond.Response) (*zond.Request, error) {
   179  	// Service the header query via the live handler code
   180  	rlpHeaders := zond.ServiceGetBlockHeadersQuery(dlp.chain, &zond.GetBlockHeadersPacket{
   181  		Origin: zond.HashOrNumber{
   182  			Hash: origin,
   183  		},
   184  		Amount:  uint64(amount),
   185  		Skip:    uint64(skip),
   186  		Reverse: reverse,
   187  	}, nil)
   188  	headers := unmarshalRlpHeaders(rlpHeaders)
   189  	// If a malicious peer is simulated withholding headers, delete them
   190  	for hash := range dlp.withholdHeaders {
   191  		for i, header := range headers {
   192  			if header.Hash() == hash {
   193  				headers = append(headers[:i], headers[i+1:]...)
   194  				break
   195  			}
   196  		}
   197  	}
   198  	hashes := make([]common.Hash, len(headers))
   199  	for i, header := range headers {
   200  		hashes[i] = header.Hash()
   201  	}
   202  	// Deliver the headers to the downloader
   203  	req := &zond.Request{
   204  		Peer: dlp.id,
   205  	}
   206  	res := &zond.Response{
   207  		Req:  req,
   208  		Res:  (*zond.BlockHeadersPacket)(&headers),
   209  		Meta: hashes,
   210  		Time: 1,
   211  		Done: make(chan error, 1), // Ignore the returned status
   212  	}
   213  	go func() {
   214  		sink <- res
   215  	}()
   216  	return req, nil
   217  }
   218  
   219  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   220  // origin; associated with a particular peer in the download tester. The returned
   221  // function can be used to retrieve batches of headers from the particular peer.
   222  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *zond.Response) (*zond.Request, error) {
   223  	// Service the header query via the live handler code
   224  	rlpHeaders := zond.ServiceGetBlockHeadersQuery(dlp.chain, &zond.GetBlockHeadersPacket{
   225  		Origin: zond.HashOrNumber{
   226  			Number: origin,
   227  		},
   228  		Amount:  uint64(amount),
   229  		Skip:    uint64(skip),
   230  		Reverse: reverse,
   231  	}, nil)
   232  	headers := unmarshalRlpHeaders(rlpHeaders)
   233  	// If a malicious peer is simulated withholding headers, delete them
   234  	for hash := range dlp.withholdHeaders {
   235  		for i, header := range headers {
   236  			if header.Hash() == hash {
   237  				headers = append(headers[:i], headers[i+1:]...)
   238  				break
   239  			}
   240  		}
   241  	}
   242  	hashes := make([]common.Hash, len(headers))
   243  	for i, header := range headers {
   244  		hashes[i] = header.Hash()
   245  	}
   246  	// Deliver the headers to the downloader
   247  	req := &zond.Request{
   248  		Peer: dlp.id,
   249  	}
   250  	res := &zond.Response{
   251  		Req:  req,
   252  		Res:  (*zond.BlockHeadersPacket)(&headers),
   253  		Meta: hashes,
   254  		Time: 1,
   255  		Done: make(chan error, 1), // Ignore the returned status
   256  	}
   257  	go func() {
   258  		sink <- res
   259  	}()
   260  	return req, nil
   261  }
   262  
   263  // RequestBodies constructs a getBlockBodies method associated with a particular
   264  // peer in the download tester. The returned function can be used to retrieve
   265  // batches of block bodies from the particularly requested peer.
   266  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *zond.Response) (*zond.Request, error) {
   267  	blobs := zond.ServiceGetBlockBodiesQuery(dlp.chain, hashes)
   268  
   269  	bodies := make([]*zond.BlockBody, len(blobs))
   270  	for i, blob := range blobs {
   271  		bodies[i] = new(zond.BlockBody)
   272  		rlp.DecodeBytes(blob, bodies[i])
   273  	}
   274  	var (
   275  		txsHashes        = make([]common.Hash, len(bodies))
   276  		uncleHashes      = make([]common.Hash, len(bodies))
   277  		withdrawalHashes = make([]common.Hash, len(bodies))
   278  	)
   279  	hasher := trie.NewStackTrie(nil)
   280  	for i, body := range bodies {
   281  		txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
   282  		uncleHashes[i] = types.CalcUncleHash(body.Uncles)
   283  	}
   284  	req := &zond.Request{
   285  		Peer: dlp.id,
   286  	}
   287  	res := &zond.Response{
   288  		Req:  req,
   289  		Res:  (*zond.BlockBodiesPacket)(&bodies),
   290  		Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
   291  		Time: 1,
   292  		Done: make(chan error, 1), // Ignore the returned status
   293  	}
   294  	go func() {
   295  		sink <- res
   296  	}()
   297  	return req, nil
   298  }
   299  
   300  // RequestReceipts constructs a getReceipts method associated with a particular
   301  // peer in the download tester. The returned function can be used to retrieve
   302  // batches of block receipts from the particularly requested peer.
   303  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *zond.Response) (*zond.Request, error) {
   304  	blobs := zond.ServiceGetReceiptsQuery(dlp.chain, hashes)
   305  
   306  	receipts := make([][]*types.Receipt, len(blobs))
   307  	for i, blob := range blobs {
   308  		rlp.DecodeBytes(blob, &receipts[i])
   309  	}
   310  	hasher := trie.NewStackTrie(nil)
   311  	hashes = make([]common.Hash, len(receipts))
   312  	for i, receipt := range receipts {
   313  		hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
   314  	}
   315  	req := &zond.Request{
   316  		Peer: dlp.id,
   317  	}
   318  	res := &zond.Response{
   319  		Req:  req,
   320  		Res:  (*zond.ReceiptsPacket)(&receipts),
   321  		Meta: hashes,
   322  		Time: 1,
   323  		Done: make(chan error, 1), // Ignore the returned status
   324  	}
   325  	go func() {
   326  		sink <- res
   327  	}()
   328  	return req, nil
   329  }
   330  
   331  // ID retrieves the peer's unique identifier.
   332  func (dlp *downloadTesterPeer) ID() string {
   333  	return dlp.id
   334  }
   335  
   336  // RequestAccountRange fetches a batch of accounts rooted in a specific account
   337  // trie, starting with the origin.
   338  func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   339  	// Create the request and service it
   340  	req := &snap.GetAccountRangePacket{
   341  		ID:     id,
   342  		Root:   root,
   343  		Origin: origin,
   344  		Limit:  limit,
   345  		Bytes:  bytes,
   346  	}
   347  	slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req)
   348  
   349  	// We need to convert to non-slim format, delegate to the packet code
   350  	res := &snap.AccountRangePacket{
   351  		ID:       id,
   352  		Accounts: slimaccs,
   353  		Proof:    proofs,
   354  	}
   355  	hashes, accounts, _ := res.Unpack()
   356  
   357  	go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs)
   358  	return nil
   359  }
   360  
   361  // RequestStorageRanges fetches a batch of storage slots belonging to one or
   362  // more accounts. If slots from only one account is requested, an origin marker
   363  // may also be used to retrieve from there.
   364  func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   365  	// Create the request and service it
   366  	req := &snap.GetStorageRangesPacket{
   367  		ID:       id,
   368  		Accounts: accounts,
   369  		Root:     root,
   370  		Origin:   origin,
   371  		Limit:    limit,
   372  		Bytes:    bytes,
   373  	}
   374  	storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req)
   375  
   376  	// We need to convert to demultiplex, delegate to the packet code
   377  	res := &snap.StorageRangesPacket{
   378  		ID:    id,
   379  		Slots: storage,
   380  		Proof: proofs,
   381  	}
   382  	hashes, slots := res.Unpack()
   383  
   384  	go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs)
   385  	return nil
   386  }
   387  
   388  // RequestByteCodes fetches a batch of bytecodes by hash.
   389  func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   390  	req := &snap.GetByteCodesPacket{
   391  		ID:     id,
   392  		Hashes: hashes,
   393  		Bytes:  bytes,
   394  	}
   395  	codes := snap.ServiceGetByteCodesQuery(dlp.chain, req)
   396  	go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes)
   397  	return nil
   398  }
   399  
   400  // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   401  // a specific state trie.
   402  func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error {
   403  	req := &snap.GetTrieNodesPacket{
   404  		ID:    id,
   405  		Root:  root,
   406  		Paths: paths,
   407  		Bytes: bytes,
   408  	}
   409  	nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now())
   410  	go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes)
   411  	return nil
   412  }
   413  
   414  // Log retrieves the peer's own contextual logger.
   415  func (dlp *downloadTesterPeer) Log() log.Logger {
   416  	return log.New("peer", dlp.id)
   417  }
   418  
   419  // assertOwnChain checks if the local chain contains the correct number of items
   420  // of the various chain components.
   421  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   422  	// Mark this method as a helper to report errors at callsite, not in here
   423  	t.Helper()
   424  
   425  	headers, blocks, receipts := length, length, length
   426  	if tester.downloader.getMode() == LightSync {
   427  		blocks, receipts = 1, 1
   428  	}
   429  	if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers {
   430  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   431  	}
   432  	if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != blocks {
   433  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   434  	}
   435  	if rs := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1; rs != receipts {
   436  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   437  	}
   438  }
   439  
   440  func TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, zond.ETH66, FullSync) }
   441  func TestCanonicalSynchronisation66Snap(t *testing.T)  { testCanonSync(t, zond.ETH66, SnapSync) }
   442  func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, zond.ETH66, LightSync) }
   443  func TestCanonicalSynchronisation67Full(t *testing.T)  { testCanonSync(t, zond.ETH67, FullSync) }
   444  func TestCanonicalSynchronisation67Snap(t *testing.T)  { testCanonSync(t, zond.ETH67, SnapSync) }
   445  func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, zond.ETH67, LightSync) }
   446  
   447  func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
   448  	tester := newTester(t)
   449  	defer tester.terminate()
   450  
   451  	// Create a small enough block chain to download
   452  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   453  	tester.newPeer("peer", protocol, chain.blocks[1:])
   454  
   455  	// Synchronise with the peer and make sure all relevant data was retrieved
   456  	if err := tester.sync("peer", nil, mode); err != nil {
   457  		t.Fatalf("failed to synchronise blocks: %v", err)
   458  	}
   459  	assertOwnChain(t, tester, len(chain.blocks))
   460  }
   461  
   462  // Tests that if a large batch of blocks are being downloaded, it is throttled
   463  // until the cached blocks are retrieved.
   464  func TestThrottling66Full(t *testing.T) { testThrottling(t, zond.ETH66, FullSync) }
   465  func TestThrottling66Snap(t *testing.T) { testThrottling(t, zond.ETH66, SnapSync) }
   466  func TestThrottling67Full(t *testing.T) { testThrottling(t, zond.ETH67, FullSync) }
   467  func TestThrottling67Snap(t *testing.T) { testThrottling(t, zond.ETH67, SnapSync) }
   468  
   469  func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
   470  	tester := newTester(t)
   471  	defer tester.terminate()
   472  
   473  	// Create a long block chain to download and the tester
   474  	targetBlocks := len(testChainBase.blocks) - 1
   475  	tester.newPeer("peer", protocol, testChainBase.blocks[1:])
   476  
   477  	// Wrap the importer to allow stepping
   478  	var blocked atomic.Uint32
   479  	proceed := make(chan struct{})
   480  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   481  		blocked.Store(uint32(len(results)))
   482  		<-proceed
   483  	}
   484  	// Start a synchronisation concurrently
   485  	errc := make(chan error, 1)
   486  	go func() {
   487  		errc <- tester.sync("peer", nil, mode)
   488  	}()
   489  	// Iteratively take some blocks, always checking the retrieval count
   490  	for {
   491  		// Check the retrieval count synchronously (! reason for this ugly block)
   492  		tester.lock.RLock()
   493  		retrieved := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
   494  		tester.lock.RUnlock()
   495  		if retrieved >= targetBlocks+1 {
   496  			break
   497  		}
   498  		// Wait a bit for sync to throttle itself
   499  		var cached, frozen int
   500  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   501  			time.Sleep(25 * time.Millisecond)
   502  
   503  			tester.lock.Lock()
   504  			tester.downloader.queue.lock.Lock()
   505  			tester.downloader.queue.resultCache.lock.Lock()
   506  			{
   507  				cached = tester.downloader.queue.resultCache.countCompleted()
   508  				frozen = int(blocked.Load())
   509  				retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
   510  			}
   511  			tester.downloader.queue.resultCache.lock.Unlock()
   512  			tester.downloader.queue.lock.Unlock()
   513  			tester.lock.Unlock()
   514  
   515  			if cached == blockCacheMaxItems ||
   516  				cached == blockCacheMaxItems-reorgProtHeaderDelay ||
   517  				retrieved+cached+frozen == targetBlocks+1 ||
   518  				retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   519  				break
   520  			}
   521  		}
   522  		// Make sure we filled up the cache, then exhaust it
   523  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   524  		tester.lock.RLock()
   525  		retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
   526  		tester.lock.RUnlock()
   527  		if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   528  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
   529  		}
   530  		// Permit the blocked blocks to import
   531  		if blocked.Load() > 0 {
   532  			blocked.Store(uint32(0))
   533  			proceed <- struct{}{}
   534  		}
   535  	}
   536  	// Check that we haven't pulled more blocks than available
   537  	assertOwnChain(t, tester, targetBlocks+1)
   538  	if err := <-errc; err != nil {
   539  		t.Fatalf("block synchronization failed: %v", err)
   540  	}
   541  }
   542  
   543  // Tests that simple synchronization against a forked chain works correctly. In
   544  // this test common ancestor lookup should *not* be short circuited, and a full
   545  // binary search should be executed.
   546  func TestForkedSync66Full(t *testing.T)  { testForkedSync(t, zond.ETH66, FullSync) }
   547  func TestForkedSync66Snap(t *testing.T)  { testForkedSync(t, zond.ETH66, SnapSync) }
   548  func TestForkedSync66Light(t *testing.T) { testForkedSync(t, zond.ETH66, LightSync) }
   549  func TestForkedSync67Full(t *testing.T)  { testForkedSync(t, zond.ETH67, FullSync) }
   550  func TestForkedSync67Snap(t *testing.T)  { testForkedSync(t, zond.ETH67, SnapSync) }
   551  func TestForkedSync67Light(t *testing.T) { testForkedSync(t, zond.ETH67, LightSync) }
   552  
   553  func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   554  	tester := newTester(t)
   555  	defer tester.terminate()
   556  
   557  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
   558  	chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81)
   559  	tester.newPeer("fork A", protocol, chainA.blocks[1:])
   560  	tester.newPeer("fork B", protocol, chainB.blocks[1:])
   561  	// Synchronise with the peer and make sure all blocks were retrieved
   562  	if err := tester.sync("fork A", nil, mode); err != nil {
   563  		t.Fatalf("failed to synchronise blocks: %v", err)
   564  	}
   565  	assertOwnChain(t, tester, len(chainA.blocks))
   566  
   567  	// Synchronise with the second peer and make sure that fork is pulled too
   568  	if err := tester.sync("fork B", nil, mode); err != nil {
   569  		t.Fatalf("failed to synchronise blocks: %v", err)
   570  	}
   571  	assertOwnChain(t, tester, len(chainB.blocks))
   572  }
   573  
   574  // Tests that synchronising against a much shorter but much heavier fork works
   575  // currently and is not dropped.
   576  func TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, zond.ETH66, FullSync) }
   577  func TestHeavyForkedSync66Snap(t *testing.T)  { testHeavyForkedSync(t, zond.ETH66, SnapSync) }
   578  func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, zond.ETH66, LightSync) }
   579  func TestHeavyForkedSync67Full(t *testing.T)  { testHeavyForkedSync(t, zond.ETH67, FullSync) }
   580  func TestHeavyForkedSync67Snap(t *testing.T)  { testHeavyForkedSync(t, zond.ETH67, SnapSync) }
   581  func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, zond.ETH67, LightSync) }
   582  
   583  func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   584  	tester := newTester(t)
   585  	defer tester.terminate()
   586  
   587  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
   588  	chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79)
   589  	tester.newPeer("light", protocol, chainA.blocks[1:])
   590  	tester.newPeer("heavy", protocol, chainB.blocks[1:])
   591  
   592  	// Synchronise with the peer and make sure all blocks were retrieved
   593  	if err := tester.sync("light", nil, mode); err != nil {
   594  		t.Fatalf("failed to synchronise blocks: %v", err)
   595  	}
   596  	assertOwnChain(t, tester, len(chainA.blocks))
   597  
   598  	// Synchronise with the second peer and make sure that fork is pulled too
   599  	if err := tester.sync("heavy", nil, mode); err != nil {
   600  		t.Fatalf("failed to synchronise blocks: %v", err)
   601  	}
   602  	assertOwnChain(t, tester, len(chainB.blocks))
   603  }
   604  
   605  // Tests that chain forks are contained within a certain interval of the current
   606  // chain head, ensuring that malicious peers cannot waste resources by feeding
   607  // long dead chains.
   608  func TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, zond.ETH66, FullSync) }
   609  func TestBoundedForkedSync66Snap(t *testing.T)  { testBoundedForkedSync(t, zond.ETH66, SnapSync) }
   610  func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, zond.ETH66, LightSync) }
   611  func TestBoundedForkedSync67Full(t *testing.T)  { testBoundedForkedSync(t, zond.ETH67, FullSync) }
   612  func TestBoundedForkedSync67Snap(t *testing.T)  { testBoundedForkedSync(t, zond.ETH67, SnapSync) }
   613  func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, zond.ETH67, LightSync) }
   614  
   615  func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   616  	tester := newTester(t)
   617  	defer tester.terminate()
   618  
   619  	chainA := testChainForkLightA
   620  	chainB := testChainForkLightB
   621  	tester.newPeer("original", protocol, chainA.blocks[1:])
   622  	tester.newPeer("rewriter", protocol, chainB.blocks[1:])
   623  
   624  	// Synchronise with the peer and make sure all blocks were retrieved
   625  	if err := tester.sync("original", nil, mode); err != nil {
   626  		t.Fatalf("failed to synchronise blocks: %v", err)
   627  	}
   628  	assertOwnChain(t, tester, len(chainA.blocks))
   629  
   630  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   631  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   632  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   633  	}
   634  }
   635  
   636  // Tests that chain forks are contained within a certain interval of the current
   637  // chain head for short but heavy forks too. These are a bit special because they
   638  // take different ancestor lookup paths.
   639  func TestBoundedHeavyForkedSync66Full(t *testing.T) {
   640  	testBoundedHeavyForkedSync(t, zond.ETH66, FullSync)
   641  }
   642  func TestBoundedHeavyForkedSync66Snap(t *testing.T) {
   643  	testBoundedHeavyForkedSync(t, zond.ETH66, SnapSync)
   644  }
   645  func TestBoundedHeavyForkedSync66Light(t *testing.T) {
   646  	testBoundedHeavyForkedSync(t, zond.ETH66, LightSync)
   647  }
   648  func TestBoundedHeavyForkedSync67Full(t *testing.T) {
   649  	testBoundedHeavyForkedSync(t, zond.ETH67, FullSync)
   650  }
   651  func TestBoundedHeavyForkedSync67Snap(t *testing.T) {
   652  	testBoundedHeavyForkedSync(t, zond.ETH67, SnapSync)
   653  }
   654  func TestBoundedHeavyForkedSync67Light(t *testing.T) {
   655  	testBoundedHeavyForkedSync(t, zond.ETH67, LightSync)
   656  }
   657  
   658  func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   659  	tester := newTester(t)
   660  	defer tester.terminate()
   661  
   662  	// Create a long enough forked chain
   663  	chainA := testChainForkLightA
   664  	chainB := testChainForkHeavy
   665  	tester.newPeer("original", protocol, chainA.blocks[1:])
   666  
   667  	// Synchronise with the peer and make sure all blocks were retrieved
   668  	if err := tester.sync("original", nil, mode); err != nil {
   669  		t.Fatalf("failed to synchronise blocks: %v", err)
   670  	}
   671  	assertOwnChain(t, tester, len(chainA.blocks))
   672  
   673  	tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:])
   674  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   675  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   676  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   677  	}
   678  }
   679  
   680  // Tests that a canceled download wipes all previously accumulated state.
   681  func TestCancel66Full(t *testing.T)  { testCancel(t, zond.ETH66, FullSync) }
   682  func TestCancel66Snap(t *testing.T)  { testCancel(t, zond.ETH66, SnapSync) }
   683  func TestCancel66Light(t *testing.T) { testCancel(t, zond.ETH66, LightSync) }
   684  func TestCancel67Full(t *testing.T)  { testCancel(t, zond.ETH67, FullSync) }
   685  func TestCancel67Snap(t *testing.T)  { testCancel(t, zond.ETH67, SnapSync) }
   686  func TestCancel67Light(t *testing.T) { testCancel(t, zond.ETH67, LightSync) }
   687  
   688  func testCancel(t *testing.T, protocol uint, mode SyncMode) {
   689  	tester := newTester(t)
   690  	defer tester.terminate()
   691  
   692  	chain := testChainBase.shorten(MaxHeaderFetch)
   693  	tester.newPeer("peer", protocol, chain.blocks[1:])
   694  
   695  	// Make sure canceling works with a pristine downloader
   696  	tester.downloader.Cancel()
   697  	if !tester.downloader.queue.Idle() {
   698  		t.Errorf("download queue not idle")
   699  	}
   700  	// Synchronise with the peer, but cancel afterwards
   701  	if err := tester.sync("peer", nil, mode); err != nil {
   702  		t.Fatalf("failed to synchronise blocks: %v", err)
   703  	}
   704  	tester.downloader.Cancel()
   705  	if !tester.downloader.queue.Idle() {
   706  		t.Errorf("download queue not idle")
   707  	}
   708  }
   709  
   710  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   711  func TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, zond.ETH66, FullSync) }
   712  func TestMultiSynchronisation66Snap(t *testing.T)  { testMultiSynchronisation(t, zond.ETH66, SnapSync) }
   713  func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, zond.ETH66, LightSync) }
   714  func TestMultiSynchronisation67Full(t *testing.T)  { testMultiSynchronisation(t, zond.ETH67, FullSync) }
   715  func TestMultiSynchronisation67Snap(t *testing.T)  { testMultiSynchronisation(t, zond.ETH67, SnapSync) }
   716  func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, zond.ETH67, LightSync) }
   717  
   718  func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
   719  	tester := newTester(t)
   720  	defer tester.terminate()
   721  
   722  	// Create various peers with various parts of the chain
   723  	targetPeers := 8
   724  	chain := testChainBase.shorten(targetPeers * 100)
   725  
   726  	for i := 0; i < targetPeers; i++ {
   727  		id := fmt.Sprintf("peer #%d", i)
   728  		tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:])
   729  	}
   730  	if err := tester.sync("peer #0", nil, mode); err != nil {
   731  		t.Fatalf("failed to synchronise blocks: %v", err)
   732  	}
   733  	assertOwnChain(t, tester, len(chain.blocks))
   734  }
   735  
   736  // Tests that synchronisations behave well in multi-version protocol environments
   737  // and not wreak havoc on other nodes in the network.
   738  func TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, zond.ETH66, FullSync) }
   739  func TestMultiProtoSynchronisation66Snap(t *testing.T)  { testMultiProtoSync(t, zond.ETH66, SnapSync) }
   740  func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, zond.ETH66, LightSync) }
   741  func TestMultiProtoSynchronisation67Full(t *testing.T)  { testMultiProtoSync(t, zond.ETH67, FullSync) }
   742  func TestMultiProtoSynchronisation67Snap(t *testing.T)  { testMultiProtoSync(t, zond.ETH67, SnapSync) }
   743  func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, zond.ETH67, LightSync) }
   744  
   745  func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
   746  	tester := newTester(t)
   747  	defer tester.terminate()
   748  
   749  	// Create a small enough block chain to download
   750  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   751  
   752  	// Create peers of every type
   753  	tester.newPeer("peer 66", zond.ETH66, chain.blocks[1:])
   754  	tester.newPeer("peer 67", zond.ETH67, chain.blocks[1:])
   755  
   756  	// Synchronise with the requested peer and make sure all blocks were retrieved
   757  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   758  		t.Fatalf("failed to synchronise blocks: %v", err)
   759  	}
   760  	assertOwnChain(t, tester, len(chain.blocks))
   761  
   762  	// Check that no peers have been dropped off
   763  	for _, version := range []int{66, 67} {
   764  		peer := fmt.Sprintf("peer %d", version)
   765  		if _, ok := tester.peers[peer]; !ok {
   766  			t.Errorf("%s dropped", peer)
   767  		}
   768  	}
   769  }
   770  
   771  // Tests that if a block is empty (e.g. header only), no body request should be
   772  // made, and instead the header should be assembled into a whole block in itself.
   773  func TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, zond.ETH66, FullSync) }
   774  func TestEmptyShortCircuit66Snap(t *testing.T)  { testEmptyShortCircuit(t, zond.ETH66, SnapSync) }
   775  func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, zond.ETH66, LightSync) }
   776  func TestEmptyShortCircuit67Full(t *testing.T)  { testEmptyShortCircuit(t, zond.ETH67, FullSync) }
   777  func TestEmptyShortCircuit67Snap(t *testing.T)  { testEmptyShortCircuit(t, zond.ETH67, SnapSync) }
   778  func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, zond.ETH67, LightSync) }
   779  
   780  func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
   781  	tester := newTester(t)
   782  	defer tester.terminate()
   783  
   784  	// Create a block chain to download
   785  	chain := testChainBase
   786  	tester.newPeer("peer", protocol, chain.blocks[1:])
   787  
   788  	// Instrument the downloader to signal body requests
   789  	var bodiesHave, receiptsHave atomic.Int32
   790  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   791  		bodiesHave.Add(int32(len(headers)))
   792  	}
   793  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   794  		receiptsHave.Add(int32(len(headers)))
   795  	}
   796  	// Synchronise with the peer and make sure all blocks were retrieved
   797  	if err := tester.sync("peer", nil, mode); err != nil {
   798  		t.Fatalf("failed to synchronise blocks: %v", err)
   799  	}
   800  	assertOwnChain(t, tester, len(chain.blocks))
   801  
   802  	// Validate the number of block bodies that should have been requested
   803  	bodiesNeeded, receiptsNeeded := 0, 0
   804  	for _, block := range chain.blocks[1:] {
   805  		if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   806  			bodiesNeeded++
   807  		}
   808  	}
   809  	for _, block := range chain.blocks[1:] {
   810  		if mode == SnapSync && len(block.Transactions()) > 0 {
   811  			receiptsNeeded++
   812  		}
   813  	}
   814  	if int(bodiesHave.Load()) != bodiesNeeded {
   815  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded)
   816  	}
   817  	if int(receiptsHave.Load()) != receiptsNeeded {
   818  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded)
   819  	}
   820  }
   821  
   822  // Tests that headers are enqueued continuously, preventing malicious nodes from
   823  // stalling the downloader by feeding gapped header chains.
   824  func TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, zond.ETH66, FullSync) }
   825  func TestMissingHeaderAttack66Snap(t *testing.T)  { testMissingHeaderAttack(t, zond.ETH66, SnapSync) }
   826  func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, zond.ETH66, LightSync) }
   827  func TestMissingHeaderAttack67Full(t *testing.T)  { testMissingHeaderAttack(t, zond.ETH67, FullSync) }
   828  func TestMissingHeaderAttack67Snap(t *testing.T)  { testMissingHeaderAttack(t, zond.ETH67, SnapSync) }
   829  func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, zond.ETH67, LightSync) }
   830  
   831  func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   832  	tester := newTester(t)
   833  	defer tester.terminate()
   834  
   835  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   836  
   837  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
   838  	attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{}
   839  
   840  	if err := tester.sync("attack", nil, mode); err == nil {
   841  		t.Fatalf("succeeded attacker synchronisation")
   842  	}
   843  	// Synchronise with the valid peer and make sure sync succeeds
   844  	tester.newPeer("valid", protocol, chain.blocks[1:])
   845  	if err := tester.sync("valid", nil, mode); err != nil {
   846  		t.Fatalf("failed to synchronise blocks: %v", err)
   847  	}
   848  	assertOwnChain(t, tester, len(chain.blocks))
   849  }
   850  
   851  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   852  // detects the invalid numbering.
   853  func TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, zond.ETH66, FullSync) }
   854  func TestShiftedHeaderAttack66Snap(t *testing.T)  { testShiftedHeaderAttack(t, zond.ETH66, SnapSync) }
   855  func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, zond.ETH66, LightSync) }
   856  func TestShiftedHeaderAttack67Full(t *testing.T)  { testShiftedHeaderAttack(t, zond.ETH67, FullSync) }
   857  func TestShiftedHeaderAttack67Snap(t *testing.T)  { testShiftedHeaderAttack(t, zond.ETH67, SnapSync) }
   858  func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, zond.ETH67, LightSync) }
   859  
   860  func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   861  	tester := newTester(t)
   862  	defer tester.terminate()
   863  
   864  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   865  
   866  	// Attempt a full sync with an attacker feeding shifted headers
   867  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
   868  	attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{}
   869  
   870  	if err := tester.sync("attack", nil, mode); err == nil {
   871  		t.Fatalf("succeeded attacker synchronisation")
   872  	}
   873  	// Synchronise with the valid peer and make sure sync succeeds
   874  	tester.newPeer("valid", protocol, chain.blocks[1:])
   875  	if err := tester.sync("valid", nil, mode); err != nil {
   876  		t.Fatalf("failed to synchronise blocks: %v", err)
   877  	}
   878  	assertOwnChain(t, tester, len(chain.blocks))
   879  }
   880  
   881  // Tests that upon detecting an invalid header, the recent ones are rolled back
   882  // for various failure scenarios. Afterwards a full sync is attempted to make
   883  // sure no state was corrupted.
   884  func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, zond.ETH66, SnapSync) }
   885  func TestInvalidHeaderRollback67Snap(t *testing.T) { testInvalidHeaderRollback(t, zond.ETH67, SnapSync) }
   886  
   887  func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
   888  	tester := newTester(t)
   889  	defer tester.terminate()
   890  
   891  	// Create a small enough block chain to download
   892  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
   893  	chain := testChainBase.shorten(targetBlocks)
   894  
   895  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
   896  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
   897  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
   898  
   899  	fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:])
   900  	fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
   901  
   902  	if err := tester.sync("fast-attack", nil, mode); err == nil {
   903  		t.Fatalf("succeeded fast attacker synchronisation")
   904  	}
   905  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
   906  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
   907  	}
   908  	// Attempt to sync with an attacker that feeds junk during the block import phase.
   909  	// This should result in both the last fsHeaderSafetyNet number of headers being
   910  	// rolled back, and also the pivot point being reverted to a non-block status.
   911  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
   912  
   913  	blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:])
   914  	fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in
   915  	blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
   916  
   917  	if err := tester.sync("block-attack", nil, mode); err == nil {
   918  		t.Fatalf("succeeded block attacker synchronisation")
   919  	}
   920  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   921  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   922  	}
   923  	if mode == SnapSync {
   924  		if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 {
   925  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   926  		}
   927  	}
   928  	// Attempt to sync with an attacker that withholds promised blocks after the
   929  	// fast sync pivot point. This could be a trial to leave the node with a bad
   930  	// but already imported pivot block.
   931  	withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:])
   932  
   933  	tester.downloader.syncInitHook = func(uint64, uint64) {
   934  		for i := missing; i < len(chain.blocks); i++ {
   935  			withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
   936  		}
   937  		tester.downloader.syncInitHook = nil
   938  	}
   939  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
   940  		t.Fatalf("succeeded withholding attacker synchronisation")
   941  	}
   942  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   943  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   944  	}
   945  	if mode == SnapSync {
   946  		if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 {
   947  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   948  		}
   949  	}
   950  	// Synchronise with the valid peer and make sure sync succeeds. Since the last rollback
   951  	// should also disable fast syncing for this process, verify that we did a fresh full
   952  	// sync. Note, we can't assert anything about the receipts since we won't purge the
   953  	// database of them, hence we can't use assertOwnChain.
   954  	tester.newPeer("valid", protocol, chain.blocks[1:])
   955  	if err := tester.sync("valid", nil, mode); err != nil {
   956  		t.Fatalf("failed to synchronise blocks: %v", err)
   957  	}
   958  	assertOwnChain(t, tester, len(chain.blocks))
   959  }
   960  
   961  // Tests that a peer advertising a high TD doesn't get to stall the downloader
   962  // afterwards by not sending any useful hashes.
   963  func TestHighTDStarvationAttack66Full(t *testing.T) {
   964  	testHighTDStarvationAttack(t, zond.ETH66, FullSync)
   965  }
   966  func TestHighTDStarvationAttack66Snap(t *testing.T) {
   967  	testHighTDStarvationAttack(t, zond.ETH66, SnapSync)
   968  }
   969  func TestHighTDStarvationAttack66Light(t *testing.T) {
   970  	testHighTDStarvationAttack(t, zond.ETH66, LightSync)
   971  }
   972  func TestHighTDStarvationAttack67Full(t *testing.T) {
   973  	testHighTDStarvationAttack(t, zond.ETH67, FullSync)
   974  }
   975  func TestHighTDStarvationAttack67Snap(t *testing.T) {
   976  	testHighTDStarvationAttack(t, zond.ETH67, SnapSync)
   977  }
   978  func TestHighTDStarvationAttack67Light(t *testing.T) {
   979  	testHighTDStarvationAttack(t, zond.ETH67, LightSync)
   980  }
   981  
   982  func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
   983  	tester := newTester(t)
   984  	defer tester.terminate()
   985  
   986  	chain := testChainBase.shorten(1)
   987  	tester.newPeer("attack", protocol, chain.blocks[1:])
   988  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
   989  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
   990  	}
   991  }
   992  
   993  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
   994  func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, zond.ETH66) }
   995  func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, zond.ETH67) }
   996  
   997  func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
   998  	// Define the disconnection requirement for individual hash fetch errors
   999  	tests := []struct {
  1000  		result error
  1001  		drop   bool
  1002  	}{
  1003  		{nil, false},                        // Sync succeeded, all is well
  1004  		{errBusy, false},                    // Sync is already in progress, no problem
  1005  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
  1006  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
  1007  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
  1008  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
  1009  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
  1010  		{errTimeout, true},                  // No hashes received in due time, drop the peer
  1011  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
  1012  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
  1013  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1014  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
  1015  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
  1016  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
  1017  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1018  	}
  1019  	// Run the tests and check disconnection status
  1020  	tester := newTester(t)
  1021  	defer tester.terminate()
  1022  	chain := testChainBase.shorten(1)
  1023  
  1024  	for i, tt := range tests {
  1025  		// Register a new peer and ensure its presence
  1026  		id := fmt.Sprintf("test %d", i)
  1027  		tester.newPeer(id, protocol, chain.blocks[1:])
  1028  		if _, ok := tester.peers[id]; !ok {
  1029  			t.Fatalf("test %d: registered peer not found", i)
  1030  		}
  1031  		// Simulate a synchronisation and check the required result
  1032  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1033  
  1034  		tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync)
  1035  		if _, ok := tester.peers[id]; !ok != tt.drop {
  1036  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1037  		}
  1038  	}
  1039  }
  1040  
  1041  // Tests that synchronisation progress (origin block number, current block number
  1042  // and highest block number) is tracked and updated correctly.
  1043  func TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, zond.ETH66, FullSync) }
  1044  func TestSyncProgress66Snap(t *testing.T)  { testSyncProgress(t, zond.ETH66, SnapSync) }
  1045  func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, zond.ETH66, LightSync) }
  1046  func TestSyncProgress67Full(t *testing.T)  { testSyncProgress(t, zond.ETH67, FullSync) }
  1047  func TestSyncProgress67Snap(t *testing.T)  { testSyncProgress(t, zond.ETH67, SnapSync) }
  1048  func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, zond.ETH67, LightSync) }
  1049  
  1050  func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1051  	tester := newTester(t)
  1052  	defer tester.terminate()
  1053  
  1054  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1055  
  1056  	// Set a sync init hook to catch progress changes
  1057  	starting := make(chan struct{})
  1058  	progress := make(chan struct{})
  1059  
  1060  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1061  		starting <- struct{}{}
  1062  		<-progress
  1063  	}
  1064  	checkProgress(t, tester.downloader, "pristine", zond.SyncProgress{})
  1065  
  1066  	// Synchronise half the blocks and check initial progress
  1067  	tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:])
  1068  	pending := new(sync.WaitGroup)
  1069  	pending.Add(1)
  1070  
  1071  	go func() {
  1072  		defer pending.Done()
  1073  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1074  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1075  		}
  1076  	}()
  1077  	<-starting
  1078  	checkProgress(t, tester.downloader, "initial", zond.SyncProgress{
  1079  		HighestBlock: uint64(len(chain.blocks)/2 - 1),
  1080  	})
  1081  	progress <- struct{}{}
  1082  	pending.Wait()
  1083  
  1084  	// Synchronise all the blocks and check continuation progress
  1085  	tester.newPeer("peer-full", protocol, chain.blocks[1:])
  1086  	pending.Add(1)
  1087  	go func() {
  1088  		defer pending.Done()
  1089  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1090  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1091  		}
  1092  	}()
  1093  	<-starting
  1094  	checkProgress(t, tester.downloader, "completing", zond.SyncProgress{
  1095  		StartingBlock: uint64(len(chain.blocks)/2 - 1),
  1096  		CurrentBlock:  uint64(len(chain.blocks)/2 - 1),
  1097  		HighestBlock:  uint64(len(chain.blocks) - 1),
  1098  	})
  1099  
  1100  	// Check final progress after successful sync
  1101  	progress <- struct{}{}
  1102  	pending.Wait()
  1103  	checkProgress(t, tester.downloader, "final", zond.SyncProgress{
  1104  		StartingBlock: uint64(len(chain.blocks)/2 - 1),
  1105  		CurrentBlock:  uint64(len(chain.blocks) - 1),
  1106  		HighestBlock:  uint64(len(chain.blocks) - 1),
  1107  	})
  1108  }
  1109  
  1110  func checkProgress(t *testing.T, d *Downloader, stage string, want zond.SyncProgress) {
  1111  	// Mark this method as a helper to report errors at callsite, not in here
  1112  	t.Helper()
  1113  
  1114  	p := d.Progress()
  1115  	if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock {
  1116  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1117  	}
  1118  }
  1119  
  1120  // Tests that synchronisation progress (origin block number and highest block
  1121  // number) is tracked and updated correctly in case of a fork (or manual head
  1122  // revertal).
  1123  func TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, zond.ETH66, FullSync) }
  1124  func TestForkedSyncProgress66Snap(t *testing.T)  { testForkedSyncProgress(t, zond.ETH66, SnapSync) }
  1125  func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, zond.ETH66, LightSync) }
  1126  func TestForkedSyncProgress67Full(t *testing.T)  { testForkedSyncProgress(t, zond.ETH67, FullSync) }
  1127  func TestForkedSyncProgress67Snap(t *testing.T)  { testForkedSyncProgress(t, zond.ETH67, SnapSync) }
  1128  func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, zond.ETH67, LightSync) }
  1129  
  1130  func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1131  	tester := newTester(t)
  1132  	defer tester.terminate()
  1133  
  1134  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
  1135  	chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
  1136  
  1137  	// Set a sync init hook to catch progress changes
  1138  	starting := make(chan struct{})
  1139  	progress := make(chan struct{})
  1140  
  1141  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1142  		starting <- struct{}{}
  1143  		<-progress
  1144  	}
  1145  	checkProgress(t, tester.downloader, "pristine", zond.SyncProgress{})
  1146  
  1147  	// Synchronise with one of the forks and check progress
  1148  	tester.newPeer("fork A", protocol, chainA.blocks[1:])
  1149  	pending := new(sync.WaitGroup)
  1150  	pending.Add(1)
  1151  	go func() {
  1152  		defer pending.Done()
  1153  		if err := tester.sync("fork A", nil, mode); err != nil {
  1154  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1155  		}
  1156  	}()
  1157  	<-starting
  1158  
  1159  	checkProgress(t, tester.downloader, "initial", zond.SyncProgress{
  1160  		HighestBlock: uint64(len(chainA.blocks) - 1),
  1161  	})
  1162  	progress <- struct{}{}
  1163  	pending.Wait()
  1164  
  1165  	// Simulate a successful sync above the fork
  1166  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1167  
  1168  	// Synchronise with the second fork and check progress resets
  1169  	tester.newPeer("fork B", protocol, chainB.blocks[1:])
  1170  	pending.Add(1)
  1171  	go func() {
  1172  		defer pending.Done()
  1173  		if err := tester.sync("fork B", nil, mode); err != nil {
  1174  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1175  		}
  1176  	}()
  1177  	<-starting
  1178  	checkProgress(t, tester.downloader, "forking", zond.SyncProgress{
  1179  		StartingBlock: uint64(len(testChainBase.blocks)) - 1,
  1180  		CurrentBlock:  uint64(len(chainA.blocks) - 1),
  1181  		HighestBlock:  uint64(len(chainB.blocks) - 1),
  1182  	})
  1183  
  1184  	// Check final progress after successful sync
  1185  	progress <- struct{}{}
  1186  	pending.Wait()
  1187  	checkProgress(t, tester.downloader, "final", zond.SyncProgress{
  1188  		StartingBlock: uint64(len(testChainBase.blocks)) - 1,
  1189  		CurrentBlock:  uint64(len(chainB.blocks) - 1),
  1190  		HighestBlock:  uint64(len(chainB.blocks) - 1),
  1191  	})
  1192  }
  1193  
  1194  // Tests that if synchronisation is aborted due to some failure, then the progress
  1195  // origin is not updated in the next sync cycle, as it should be considered the
  1196  // continuation of the previous sync and not a new instance.
  1197  func TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, zond.ETH66, FullSync) }
  1198  func TestFailedSyncProgress66Snap(t *testing.T)  { testFailedSyncProgress(t, zond.ETH66, SnapSync) }
  1199  func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, zond.ETH66, LightSync) }
  1200  func TestFailedSyncProgress67Full(t *testing.T)  { testFailedSyncProgress(t, zond.ETH67, FullSync) }
  1201  func TestFailedSyncProgress67Snap(t *testing.T)  { testFailedSyncProgress(t, zond.ETH67, SnapSync) }
  1202  func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, zond.ETH67, LightSync) }
  1203  
  1204  func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1205  	tester := newTester(t)
  1206  	defer tester.terminate()
  1207  
  1208  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1209  
  1210  	// Set a sync init hook to catch progress changes
  1211  	starting := make(chan struct{})
  1212  	progress := make(chan struct{})
  1213  
  1214  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1215  		starting <- struct{}{}
  1216  		<-progress
  1217  	}
  1218  	checkProgress(t, tester.downloader, "pristine", zond.SyncProgress{})
  1219  
  1220  	// Attempt a full sync with a faulty peer
  1221  	missing := len(chain.blocks)/2 - 1
  1222  
  1223  	faulter := tester.newPeer("faulty", protocol, chain.blocks[1:])
  1224  	faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
  1225  
  1226  	pending := new(sync.WaitGroup)
  1227  	pending.Add(1)
  1228  	go func() {
  1229  		defer pending.Done()
  1230  		if err := tester.sync("faulty", nil, mode); err == nil {
  1231  			panic("succeeded faulty synchronisation")
  1232  		}
  1233  	}()
  1234  	<-starting
  1235  	checkProgress(t, tester.downloader, "initial", zond.SyncProgress{
  1236  		HighestBlock: uint64(len(chain.blocks) - 1),
  1237  	})
  1238  	progress <- struct{}{}
  1239  	pending.Wait()
  1240  	afterFailedSync := tester.downloader.Progress()
  1241  
  1242  	// Synchronise with a good peer and check that the progress origin remind the same
  1243  	// after a failure
  1244  	tester.newPeer("valid", protocol, chain.blocks[1:])
  1245  	pending.Add(1)
  1246  	go func() {
  1247  		defer pending.Done()
  1248  		if err := tester.sync("valid", nil, mode); err != nil {
  1249  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1250  		}
  1251  	}()
  1252  	<-starting
  1253  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1254  
  1255  	// Check final progress after successful sync
  1256  	progress <- struct{}{}
  1257  	pending.Wait()
  1258  	checkProgress(t, tester.downloader, "final", zond.SyncProgress{
  1259  		CurrentBlock: uint64(len(chain.blocks) - 1),
  1260  		HighestBlock: uint64(len(chain.blocks) - 1),
  1261  	})
  1262  }
  1263  
  1264  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1265  // the progress height is successfully reduced at the next sync invocation.
  1266  func TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, zond.ETH66, FullSync) }
  1267  func TestFakedSyncProgress66Snap(t *testing.T)  { testFakedSyncProgress(t, zond.ETH66, SnapSync) }
  1268  func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, zond.ETH66, LightSync) }
  1269  func TestFakedSyncProgress67Full(t *testing.T)  { testFakedSyncProgress(t, zond.ETH67, FullSync) }
  1270  func TestFakedSyncProgress67Snap(t *testing.T)  { testFakedSyncProgress(t, zond.ETH67, SnapSync) }
  1271  func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, zond.ETH67, LightSync) }
  1272  
  1273  func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1274  	tester := newTester(t)
  1275  	defer tester.terminate()
  1276  
  1277  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1278  
  1279  	// Set a sync init hook to catch progress changes
  1280  	starting := make(chan struct{})
  1281  	progress := make(chan struct{})
  1282  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1283  		starting <- struct{}{}
  1284  		<-progress
  1285  	}
  1286  	checkProgress(t, tester.downloader, "pristine", zond.SyncProgress{})
  1287  
  1288  	// Create and sync with an attacker that promises a higher chain than available.
  1289  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
  1290  	numMissing := 5
  1291  	for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- {
  1292  		attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
  1293  	}
  1294  	pending := new(sync.WaitGroup)
  1295  	pending.Add(1)
  1296  	go func() {
  1297  		defer pending.Done()
  1298  		if err := tester.sync("attack", nil, mode); err == nil {
  1299  			panic("succeeded attacker synchronisation")
  1300  		}
  1301  	}()
  1302  	<-starting
  1303  	checkProgress(t, tester.downloader, "initial", zond.SyncProgress{
  1304  		HighestBlock: uint64(len(chain.blocks) - 1),
  1305  	})
  1306  	progress <- struct{}{}
  1307  	pending.Wait()
  1308  	afterFailedSync := tester.downloader.Progress()
  1309  
  1310  	// Synchronise with a good peer and check that the progress height has been reduced to
  1311  	// the true value.
  1312  	validChain := chain.shorten(len(chain.blocks) - numMissing)
  1313  	tester.newPeer("valid", protocol, validChain.blocks[1:])
  1314  	pending.Add(1)
  1315  
  1316  	go func() {
  1317  		defer pending.Done()
  1318  		if err := tester.sync("valid", nil, mode); err != nil {
  1319  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1320  		}
  1321  	}()
  1322  	<-starting
  1323  	checkProgress(t, tester.downloader, "completing", zond.SyncProgress{
  1324  		CurrentBlock: afterFailedSync.CurrentBlock,
  1325  		HighestBlock: uint64(len(validChain.blocks) - 1),
  1326  	})
  1327  	// Check final progress after successful sync.
  1328  	progress <- struct{}{}
  1329  	pending.Wait()
  1330  	checkProgress(t, tester.downloader, "final", zond.SyncProgress{
  1331  		CurrentBlock: uint64(len(validChain.blocks) - 1),
  1332  		HighestBlock: uint64(len(validChain.blocks) - 1),
  1333  	})
  1334  }
  1335  
  1336  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1337  	testCases := []struct {
  1338  		remoteHeight uint64
  1339  		localHeight  uint64
  1340  		expected     []int
  1341  	}{
  1342  		// Remote is way higher. We should ask for the remote head and go backwards
  1343  		{1500, 1000,
  1344  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1345  		},
  1346  		{15000, 13006,
  1347  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1348  		},
  1349  		// Remote is pretty close to us. We don't have to fetch as many
  1350  		{1200, 1150,
  1351  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1352  		},
  1353  		// Remote is equal to us (so on a fork with higher td)
  1354  		// We should get the closest couple of ancestors
  1355  		{1500, 1500,
  1356  			[]int{1497, 1499},
  1357  		},
  1358  		// We're higher than the remote! Odd
  1359  		{1000, 1500,
  1360  			[]int{997, 999},
  1361  		},
  1362  		// Check some weird edgecases that it behaves somewhat rationally
  1363  		{0, 1500,
  1364  			[]int{0, 2},
  1365  		},
  1366  		{6000000, 0,
  1367  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1368  		},
  1369  		{0, 0,
  1370  			[]int{0, 2},
  1371  		},
  1372  	}
  1373  	reqs := func(from, count, span int) []int {
  1374  		var r []int
  1375  		num := from
  1376  		for len(r) < count {
  1377  			r = append(r, num)
  1378  			num += span + 1
  1379  		}
  1380  		return r
  1381  	}
  1382  	for i, tt := range testCases {
  1383  		from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1384  		data := reqs(int(from), count, span)
  1385  
  1386  		if max != uint64(data[len(data)-1]) {
  1387  			t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1388  		}
  1389  		failed := false
  1390  		if len(data) != len(tt.expected) {
  1391  			failed = true
  1392  			t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1393  		} else {
  1394  			for j, n := range data {
  1395  				if n != tt.expected[j] {
  1396  					failed = true
  1397  					break
  1398  				}
  1399  			}
  1400  		}
  1401  		if failed {
  1402  			res := strings.ReplaceAll(fmt.Sprint(data), " ", ",")
  1403  			exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",")
  1404  			t.Logf("got: %v\n", res)
  1405  			t.Logf("exp: %v\n", exp)
  1406  			t.Errorf("test %d: wrong values", i)
  1407  		}
  1408  	}
  1409  }
  1410  
  1411  // Tests that peers below a pre-configured checkpoint block are prevented from
  1412  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1413  func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, zond.ETH66, FullSync) }
  1414  func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, zond.ETH66, SnapSync) }
  1415  
  1416  func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
  1417  	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
  1418  
  1419  	var cases = []struct {
  1420  		name  string // The name of testing scenario
  1421  		local int    // The length of local chain(canonical chain assumed), 0 means genesis is the head
  1422  	}{
  1423  		{name: "Beacon sync since genesis", local: 0},
  1424  		{name: "Beacon sync with short local chain", local: 1},
  1425  		{name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2},
  1426  		{name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1},
  1427  	}
  1428  	for _, c := range cases {
  1429  		t.Run(c.name, func(t *testing.T) {
  1430  			success := make(chan struct{})
  1431  			tester := newTesterWithNotification(t, func() {
  1432  				close(success)
  1433  			})
  1434  			defer tester.terminate()
  1435  
  1436  			chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1437  			tester.newPeer("peer", protocol, chain.blocks[1:])
  1438  
  1439  			// Build the local chain segment if it's required
  1440  			if c.local > 0 {
  1441  				tester.chain.InsertChain(chain.blocks[1 : c.local+1])
  1442  			}
  1443  			if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
  1444  				t.Fatalf("Failed to beacon sync chain %v %v", c.name, err)
  1445  			}
  1446  			select {
  1447  			case <-success:
  1448  				// Ok, downloader fully cancelled after sync cycle
  1449  				if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != len(chain.blocks) {
  1450  					t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks))
  1451  				}
  1452  			case <-time.NewTimer(time.Second * 3).C:
  1453  				t.Fatalf("Failed to sync chain in three seconds")
  1454  			}
  1455  		})
  1456  	}
  1457  }