github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/eth/downloader/downloader_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"io/ioutil"
    23  	"math/big"
    24  	"os"
    25  	"strings"
    26  	"sync"
    27  	"sync/atomic"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum"
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/consensus/ethash"
    34  	"github.com/ethereum/go-ethereum/core"
    35  	"github.com/ethereum/go-ethereum/core/rawdb"
    36  	"github.com/ethereum/go-ethereum/core/types"
    37  	"github.com/ethereum/go-ethereum/core/vm"
    38  	"github.com/ethereum/go-ethereum/eth/downloader/whitelist"
    39  	"github.com/ethereum/go-ethereum/eth/protocols/eth"
    40  	"github.com/ethereum/go-ethereum/eth/protocols/snap"
    41  	"github.com/ethereum/go-ethereum/event"
    42  	"github.com/ethereum/go-ethereum/log"
    43  	"github.com/ethereum/go-ethereum/params"
    44  	"github.com/ethereum/go-ethereum/rlp"
    45  	"github.com/ethereum/go-ethereum/trie"
    46  
    47  	"github.com/stretchr/testify/assert"
    48  )
    49  
    50  // downloadTester is a test simulator for mocking out local block chain.
    51  type downloadTester struct {
    52  	freezer    string
    53  	chain      *core.BlockChain
    54  	downloader *Downloader
    55  
    56  	peers map[string]*downloadTesterPeer
    57  	lock  sync.RWMutex
    58  }
    59  
    60  // newTester creates a new downloader test mocker.
    61  func newTester() *downloadTester {
    62  	freezer, err := ioutil.TempDir("", "")
    63  	if err != nil {
    64  		panic(err)
    65  	}
    66  
    67  	db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
    68  	if err != nil {
    69  		panic(err)
    70  	}
    71  
    72  	core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000))
    73  
    74  	chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil, nil)
    75  	if err != nil {
    76  		panic(err)
    77  	}
    78  
    79  	tester := &downloadTester{
    80  		freezer: freezer,
    81  		chain:   chain,
    82  		peers:   make(map[string]*downloadTesterPeer),
    83  	}
    84  
    85  	//nolint: staticcheck
    86  	tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, nil, whitelist.NewService(10))
    87  
    88  	return tester
    89  }
    90  
    91  func (dl *downloadTester) setWhitelist(w ethereum.ChainValidator) {
    92  	dl.downloader.ChainValidator = w
    93  }
    94  
    95  // terminate aborts any operations on the embedded downloader and releases all
    96  // held resources.
    97  func (dl *downloadTester) terminate() {
    98  	dl.downloader.Terminate()
    99  	dl.chain.Stop()
   100  
   101  	os.RemoveAll(dl.freezer)
   102  }
   103  
   104  // sync starts synchronizing with a remote peer, blocking until it completes.
   105  func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
   106  	head := dl.peers[id].chain.CurrentBlock()
   107  	if td == nil {
   108  		// If no particular TD was requested, load from the peer's blockchain
   109  		td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64())
   110  	}
   111  	// Synchronise with the chosen peer and ensure proper cleanup afterwards
   112  	err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil)
   113  	select {
   114  	case <-dl.downloader.cancelCh:
   115  		// Ok, downloader fully cancelled after sync cycle
   116  	default:
   117  		// Downloader is still accepting packets, can block a peer up
   118  		panic("downloader active post sync cycle") // panic will be caught by tester
   119  	}
   120  	return err
   121  }
   122  
   123  // newPeer registers a new block download source into the downloader.
   124  func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer {
   125  	dl.lock.Lock()
   126  	defer dl.lock.Unlock()
   127  
   128  	peer := &downloadTesterPeer{
   129  		dl:              dl,
   130  		id:              id,
   131  		chain:           newTestBlockchain(blocks),
   132  		withholdHeaders: make(map[common.Hash]struct{}),
   133  	}
   134  	dl.peers[id] = peer
   135  
   136  	if err := dl.downloader.RegisterPeer(id, version, peer); err != nil {
   137  		panic(err)
   138  	}
   139  	if err := dl.downloader.SnapSyncer.Register(peer); err != nil {
   140  		panic(err)
   141  	}
   142  	return peer
   143  }
   144  
   145  // dropPeer simulates a hard peer removal from the connection pool.
   146  func (dl *downloadTester) dropPeer(id string) {
   147  	dl.lock.Lock()
   148  	defer dl.lock.Unlock()
   149  
   150  	delete(dl.peers, id)
   151  	dl.downloader.SnapSyncer.Unregister(id)
   152  	dl.downloader.UnregisterPeer(id)
   153  }
   154  
   155  type downloadTesterPeer struct {
   156  	dl    *downloadTester
   157  	id    string
   158  	chain *core.BlockChain
   159  
   160  	withholdHeaders map[common.Hash]struct{}
   161  }
   162  
   163  // Head constructs a function to retrieve a peer's current head hash
   164  // and total difficulty.
   165  func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
   166  	head := dlp.chain.CurrentBlock()
   167  	return head.Hash(), dlp.chain.GetTd(head.Hash(), head.NumberU64())
   168  }
   169  
   170  func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
   171  	headers := make([]*types.Header, len(rlpdata))
   172  	for i, data := range rlpdata {
   173  		var h types.Header
   174  		if err := rlp.DecodeBytes(data, &h); err != nil {
   175  			panic(err)
   176  		}
   177  		headers[i] = &h
   178  	}
   179  	return headers
   180  }
   181  
   182  // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
   183  // origin; associated with a particular peer in the download tester. The returned
   184  // function can be used to retrieve batches of headers from the particular peer.
   185  func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
   186  	// Service the header query via the live handler code
   187  	rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{
   188  		Origin: eth.HashOrNumber{
   189  			Hash: origin,
   190  		},
   191  		Amount:  uint64(amount),
   192  		Skip:    uint64(skip),
   193  		Reverse: reverse,
   194  	}, nil)
   195  	headers := unmarshalRlpHeaders(rlpHeaders)
   196  	// If a malicious peer is simulated withholding headers, delete them
   197  	for hash := range dlp.withholdHeaders {
   198  		for i, header := range headers {
   199  			if header.Hash() == hash {
   200  				headers = append(headers[:i], headers[i+1:]...)
   201  				break
   202  			}
   203  		}
   204  	}
   205  	hashes := make([]common.Hash, len(headers))
   206  	for i, header := range headers {
   207  		hashes[i] = header.Hash()
   208  	}
   209  	// Deliver the headers to the downloader
   210  	req := &eth.Request{
   211  		Peer: dlp.id,
   212  	}
   213  	res := &eth.Response{
   214  		Req:  req,
   215  		Res:  (*eth.BlockHeadersPacket)(&headers),
   216  		Meta: hashes,
   217  		Time: 1,
   218  		Done: make(chan error, 1), // Ignore the returned status
   219  	}
   220  	go func() {
   221  		sink <- res
   222  	}()
   223  	return req, nil
   224  }
   225  
   226  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   227  // origin; associated with a particular peer in the download tester. The returned
   228  // function can be used to retrieve batches of headers from the particular peer.
   229  func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
   230  	// Service the header query via the live handler code
   231  	rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{
   232  		Origin: eth.HashOrNumber{
   233  			Number: origin,
   234  		},
   235  		Amount:  uint64(amount),
   236  		Skip:    uint64(skip),
   237  		Reverse: reverse,
   238  	}, nil)
   239  	headers := unmarshalRlpHeaders(rlpHeaders)
   240  	// If a malicious peer is simulated withholding headers, delete them
   241  	for hash := range dlp.withholdHeaders {
   242  		for i, header := range headers {
   243  			if header.Hash() == hash {
   244  				headers = append(headers[:i], headers[i+1:]...)
   245  				break
   246  			}
   247  		}
   248  	}
   249  	hashes := make([]common.Hash, len(headers))
   250  	for i, header := range headers {
   251  		hashes[i] = header.Hash()
   252  	}
   253  	// Deliver the headers to the downloader
   254  	req := &eth.Request{
   255  		Peer: dlp.id,
   256  	}
   257  	res := &eth.Response{
   258  		Req:  req,
   259  		Res:  (*eth.BlockHeadersPacket)(&headers),
   260  		Meta: hashes,
   261  		Time: 1,
   262  		Done: make(chan error, 1), // Ignore the returned status
   263  	}
   264  	go func() {
   265  		sink <- res
   266  	}()
   267  	return req, nil
   268  }
   269  
   270  // RequestBodies constructs a getBlockBodies method associated with a particular
   271  // peer in the download tester. The returned function can be used to retrieve
   272  // batches of block bodies from the particularly requested peer.
   273  func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
   274  	blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes)
   275  
   276  	bodies := make([]*eth.BlockBody, len(blobs))
   277  	for i, blob := range blobs {
   278  		bodies[i] = new(eth.BlockBody)
   279  		rlp.DecodeBytes(blob, bodies[i])
   280  	}
   281  	var (
   282  		txsHashes   = make([]common.Hash, len(bodies))
   283  		uncleHashes = make([]common.Hash, len(bodies))
   284  	)
   285  	hasher := trie.NewStackTrie(nil)
   286  	for i, body := range bodies {
   287  		txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
   288  		uncleHashes[i] = types.CalcUncleHash(body.Uncles)
   289  	}
   290  	req := &eth.Request{
   291  		Peer: dlp.id,
   292  	}
   293  	res := &eth.Response{
   294  		Req:  req,
   295  		Res:  (*eth.BlockBodiesPacket)(&bodies),
   296  		Meta: [][]common.Hash{txsHashes, uncleHashes},
   297  		Time: 1,
   298  		Done: make(chan error, 1), // Ignore the returned status
   299  	}
   300  	go func() {
   301  		sink <- res
   302  	}()
   303  	return req, nil
   304  }
   305  
   306  // RequestReceipts constructs a getReceipts method associated with a particular
   307  // peer in the download tester. The returned function can be used to retrieve
   308  // batches of block receipts from the particularly requested peer.
   309  func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
   310  	blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes)
   311  
   312  	receipts := make([][]*types.Receipt, len(blobs))
   313  	for i, blob := range blobs {
   314  		rlp.DecodeBytes(blob, &receipts[i])
   315  	}
   316  	hasher := trie.NewStackTrie(nil)
   317  	hashes = make([]common.Hash, len(receipts))
   318  	for i, receipt := range receipts {
   319  		hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
   320  	}
   321  	req := &eth.Request{
   322  		Peer: dlp.id,
   323  	}
   324  	res := &eth.Response{
   325  		Req:  req,
   326  		Res:  (*eth.ReceiptsPacket)(&receipts),
   327  		Meta: hashes,
   328  		Time: 1,
   329  		Done: make(chan error, 1), // Ignore the returned status
   330  	}
   331  	go func() {
   332  		sink <- res
   333  	}()
   334  	return req, nil
   335  }
   336  
   337  // ID retrieves the peer's unique identifier.
   338  func (dlp *downloadTesterPeer) ID() string {
   339  	return dlp.id
   340  }
   341  
   342  // RequestAccountRange fetches a batch of accounts rooted in a specific account
   343  // trie, starting with the origin.
   344  func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   345  	// Create the request and service it
   346  	req := &snap.GetAccountRangePacket{
   347  		ID:     id,
   348  		Root:   root,
   349  		Origin: origin,
   350  		Limit:  limit,
   351  		Bytes:  bytes,
   352  	}
   353  	slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req)
   354  
   355  	// We need to convert to non-slim format, delegate to the packet code
   356  	res := &snap.AccountRangePacket{
   357  		ID:       id,
   358  		Accounts: slimaccs,
   359  		Proof:    proofs,
   360  	}
   361  	hashes, accounts, _ := res.Unpack()
   362  
   363  	go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs)
   364  	return nil
   365  }
   366  
   367  // RequestStorageRanges fetches a batch of storage slots belonging to one or
   368  // more accounts. If slots from only one accout is requested, an origin marker
   369  // may also be used to retrieve from there.
   370  func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   371  	// Create the request and service it
   372  	req := &snap.GetStorageRangesPacket{
   373  		ID:       id,
   374  		Accounts: accounts,
   375  		Root:     root,
   376  		Origin:   origin,
   377  		Limit:    limit,
   378  		Bytes:    bytes,
   379  	}
   380  	storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req)
   381  
   382  	// We need to convert to demultiplex, delegate to the packet code
   383  	res := &snap.StorageRangesPacket{
   384  		ID:    id,
   385  		Slots: storage,
   386  		Proof: proofs,
   387  	}
   388  	hashes, slots := res.Unpack()
   389  
   390  	go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs)
   391  	return nil
   392  }
   393  
   394  // RequestByteCodes fetches a batch of bytecodes by hash.
   395  func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   396  	req := &snap.GetByteCodesPacket{
   397  		ID:     id,
   398  		Hashes: hashes,
   399  		Bytes:  bytes,
   400  	}
   401  	codes := snap.ServiceGetByteCodesQuery(dlp.chain, req)
   402  	go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes)
   403  	return nil
   404  }
   405  
   406  // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
   407  // a specificstate trie.
   408  func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error {
   409  	req := &snap.GetTrieNodesPacket{
   410  		ID:    id,
   411  		Root:  root,
   412  		Paths: paths,
   413  		Bytes: bytes,
   414  	}
   415  	nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now())
   416  	go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes)
   417  	return nil
   418  }
   419  
   420  // Log retrieves the peer's own contextual logger.
   421  func (dlp *downloadTesterPeer) Log() log.Logger {
   422  	return log.New("peer", dlp.id)
   423  }
   424  
   425  // assertOwnChain checks if the local chain contains the correct number of items
   426  // of the various chain components.
   427  func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
   428  	// Mark this method as a helper to report errors at callsite, not in here
   429  	t.Helper()
   430  
   431  	headers, blocks, receipts := length, length, length
   432  	if tester.downloader.getMode() == LightSync {
   433  		blocks, receipts = 1, 1
   434  	}
   435  	if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers {
   436  		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
   437  	}
   438  	if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != blocks {
   439  		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
   440  	}
   441  	if rs := int(tester.chain.CurrentFastBlock().NumberU64()) + 1; rs != receipts {
   442  		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
   443  	}
   444  }
   445  
   446  func TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, eth.ETH66, FullSync) }
   447  func TestCanonicalSynchronisation66Snap(t *testing.T)  { testCanonSync(t, eth.ETH66, SnapSync) }
   448  func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
   449  
   450  func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
   451  	tester := newTester()
   452  	defer tester.terminate()
   453  
   454  	// Create a small enough block chain to download
   455  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   456  	tester.newPeer("peer", protocol, chain.blocks[1:])
   457  
   458  	// Synchronise with the peer and make sure all relevant data was retrieved
   459  	if err := tester.sync("peer", nil, mode); err != nil {
   460  		t.Fatalf("failed to synchronise blocks: %v", err)
   461  	}
   462  	assertOwnChain(t, tester, len(chain.blocks))
   463  }
   464  
   465  // Tests that if a large batch of blocks are being downloaded, it is throttled
   466  // until the cached blocks are retrieved.
   467  func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
   468  func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) }
   469  
   470  func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
   471  	tester := newTester()
   472  	defer tester.terminate()
   473  
   474  	// Create a long block chain to download and the tester
   475  	targetBlocks := len(testChainBase.blocks) - 1
   476  	tester.newPeer("peer", protocol, testChainBase.blocks[1:])
   477  
   478  	// Wrap the importer to allow stepping
   479  	blocked, proceed := uint32(0), make(chan struct{})
   480  	tester.downloader.chainInsertHook = func(results []*fetchResult) {
   481  		atomic.StoreUint32(&blocked, uint32(len(results)))
   482  		<-proceed
   483  	}
   484  	// Start a synchronisation concurrently
   485  	errc := make(chan error, 1)
   486  	go func() {
   487  		errc <- tester.sync("peer", nil, mode)
   488  	}()
   489  	// Iteratively take some blocks, always checking the retrieval count
   490  	for {
   491  		// Check the retrieval count synchronously (! reason for this ugly block)
   492  		tester.lock.RLock()
   493  		retrieved := int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1
   494  		tester.lock.RUnlock()
   495  		if retrieved >= targetBlocks+1 {
   496  			break
   497  		}
   498  		// Wait a bit for sync to throttle itself
   499  		var cached, frozen int
   500  		for start := time.Now(); time.Since(start) < 3*time.Second; {
   501  			time.Sleep(25 * time.Millisecond)
   502  
   503  			tester.lock.Lock()
   504  			tester.downloader.queue.lock.Lock()
   505  			tester.downloader.queue.resultCache.lock.Lock()
   506  			{
   507  				cached = tester.downloader.queue.resultCache.countCompleted()
   508  				frozen = int(atomic.LoadUint32(&blocked))
   509  				retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1
   510  			}
   511  			tester.downloader.queue.resultCache.lock.Unlock()
   512  			tester.downloader.queue.lock.Unlock()
   513  			tester.lock.Unlock()
   514  
   515  			if cached == blockCacheMaxItems ||
   516  				cached == blockCacheMaxItems-reorgProtHeaderDelay ||
   517  				retrieved+cached+frozen == targetBlocks+1 ||
   518  				retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
   519  				break
   520  			}
   521  		}
   522  		// Make sure we filled up the cache, then exhaust it
   523  		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
   524  		tester.lock.RLock()
   525  		retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1
   526  		tester.lock.RUnlock()
   527  		if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
   528  			t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
   529  		}
   530  		// Permit the blocked blocks to import
   531  		if atomic.LoadUint32(&blocked) > 0 {
   532  			atomic.StoreUint32(&blocked, uint32(0))
   533  			proceed <- struct{}{}
   534  		}
   535  	}
   536  	// Check that we haven't pulled more blocks than available
   537  	assertOwnChain(t, tester, targetBlocks+1)
   538  	if err := <-errc; err != nil {
   539  		t.Fatalf("block synchronization failed: %v", err)
   540  	}
   541  }
   542  
   543  // Tests that simple synchronization against a forked chain works correctly. In
   544  // this test common ancestor lookup should *not* be short circuited, and a full
   545  // binary search should be executed.
   546  func TestForkedSync66Full(t *testing.T)  { testForkedSync(t, eth.ETH66, FullSync) }
   547  func TestForkedSync66Snap(t *testing.T)  { testForkedSync(t, eth.ETH66, SnapSync) }
   548  func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
   549  
   550  func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   551  	tester := newTester()
   552  	defer tester.terminate()
   553  
   554  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
   555  	chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81)
   556  	tester.newPeer("fork A", protocol, chainA.blocks[1:])
   557  	tester.newPeer("fork B", protocol, chainB.blocks[1:])
   558  	// Synchronise with the peer and make sure all blocks were retrieved
   559  	if err := tester.sync("fork A", nil, mode); err != nil {
   560  		t.Fatalf("failed to synchronise blocks: %v", err)
   561  	}
   562  	assertOwnChain(t, tester, len(chainA.blocks))
   563  
   564  	// Synchronise with the second peer and make sure that fork is pulled too
   565  	if err := tester.sync("fork B", nil, mode); err != nil {
   566  		t.Fatalf("failed to synchronise blocks: %v", err)
   567  	}
   568  	assertOwnChain(t, tester, len(chainB.blocks))
   569  }
   570  
   571  // Tests that synchronising against a much shorter but much heavyer fork works
   572  // corrently and is not dropped.
   573  func TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FullSync) }
   574  func TestHeavyForkedSync66Snap(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, SnapSync) }
   575  func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
   576  
   577  func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   578  	tester := newTester()
   579  	defer tester.terminate()
   580  
   581  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
   582  	chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79)
   583  	tester.newPeer("light", protocol, chainA.blocks[1:])
   584  	tester.newPeer("heavy", protocol, chainB.blocks[1:])
   585  
   586  	// Synchronise with the peer and make sure all blocks were retrieved
   587  	if err := tester.sync("light", nil, mode); err != nil {
   588  		t.Fatalf("failed to synchronise blocks: %v", err)
   589  	}
   590  	assertOwnChain(t, tester, len(chainA.blocks))
   591  
   592  	// Synchronise with the second peer and make sure that fork is pulled too
   593  	if err := tester.sync("heavy", nil, mode); err != nil {
   594  		t.Fatalf("failed to synchronise blocks: %v", err)
   595  	}
   596  	assertOwnChain(t, tester, len(chainB.blocks))
   597  }
   598  
   599  // Tests that chain forks are contained within a certain interval of the current
   600  // chain head, ensuring that malicious peers cannot waste resources by feeding
   601  // long dead chains.
   602  func TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FullSync) }
   603  func TestBoundedForkedSync66Snap(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, SnapSync) }
   604  func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
   605  
   606  func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   607  	tester := newTester()
   608  	defer tester.terminate()
   609  
   610  	chainA := testChainForkLightA
   611  	chainB := testChainForkLightB
   612  	tester.newPeer("original", protocol, chainA.blocks[1:])
   613  	tester.newPeer("rewriter", protocol, chainB.blocks[1:])
   614  
   615  	// Synchronise with the peer and make sure all blocks were retrieved
   616  	if err := tester.sync("original", nil, mode); err != nil {
   617  		t.Fatalf("failed to synchronise blocks: %v", err)
   618  	}
   619  	assertOwnChain(t, tester, len(chainA.blocks))
   620  
   621  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   622  	if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
   623  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   624  	}
   625  }
   626  
   627  // Tests that chain forks are contained within a certain interval of the current
   628  // chain head for short but heavy forks too. These are a bit special because they
   629  // take different ancestor lookup paths.
   630  func TestBoundedHeavyForkedSync66Full(t *testing.T) {
   631  	testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
   632  }
   633  func TestBoundedHeavyForkedSync66Snap(t *testing.T) {
   634  	testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync)
   635  }
   636  
   637  func TestBoundedHeavyForkedSync66Light(t *testing.T) {
   638  	testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
   639  }
   640  
   641  func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
   642  	tester := newTester()
   643  	defer tester.terminate()
   644  
   645  	// Create a long enough forked chain
   646  	chainA := testChainForkLightA
   647  	chainB := testChainForkHeavy
   648  	tester.newPeer("original", protocol, chainA.blocks[1:])
   649  
   650  	// Synchronise with the peer and make sure all blocks were retrieved
   651  	if err := tester.sync("original", nil, mode); err != nil {
   652  		t.Fatalf("failed to synchronise blocks: %v", err)
   653  	}
   654  	assertOwnChain(t, tester, len(chainA.blocks))
   655  
   656  	tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:])
   657  	// Synchronise with the second peer and ensure that the fork is rejected to being too old
   658  	if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
   659  		t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
   660  	}
   661  }
   662  
   663  // Tests that a canceled download wipes all previously accumulated state.
   664  func TestCancel66Full(t *testing.T)  { testCancel(t, eth.ETH66, FullSync) }
   665  func TestCancel66Snap(t *testing.T)  { testCancel(t, eth.ETH66, SnapSync) }
   666  func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
   667  
   668  func testCancel(t *testing.T, protocol uint, mode SyncMode) {
   669  	tester := newTester()
   670  	defer tester.terminate()
   671  
   672  	chain := testChainBase.shorten(MaxHeaderFetch)
   673  	tester.newPeer("peer", protocol, chain.blocks[1:])
   674  
   675  	// Make sure canceling works with a pristine downloader
   676  	tester.downloader.Cancel()
   677  	if !tester.downloader.queue.Idle() {
   678  		t.Errorf("download queue not idle")
   679  	}
   680  	// Synchronise with the peer, but cancel afterwards
   681  	if err := tester.sync("peer", nil, mode); err != nil {
   682  		t.Fatalf("failed to synchronise blocks: %v", err)
   683  	}
   684  	tester.downloader.Cancel()
   685  	if !tester.downloader.queue.Idle() {
   686  		t.Errorf("download queue not idle")
   687  	}
   688  }
   689  
   690  // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
   691  func TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FullSync) }
   692  func TestMultiSynchronisation66Snap(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, SnapSync) }
   693  func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
   694  
   695  func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
   696  	tester := newTester()
   697  	defer tester.terminate()
   698  
   699  	// Create various peers with various parts of the chain
   700  	targetPeers := 8
   701  	chain := testChainBase.shorten(targetPeers * 100)
   702  
   703  	for i := 0; i < targetPeers; i++ {
   704  		id := fmt.Sprintf("peer #%d", i)
   705  		tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:])
   706  	}
   707  	if err := tester.sync("peer #0", nil, mode); err != nil {
   708  		t.Fatalf("failed to synchronise blocks: %v", err)
   709  	}
   710  	assertOwnChain(t, tester, len(chain.blocks))
   711  }
   712  
   713  // Tests that synchronisations behave well in multi-version protocol environments
   714  // and not wreak havoc on other nodes in the network.
   715  func TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FullSync) }
   716  func TestMultiProtoSynchronisation66Snap(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, SnapSync) }
   717  func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
   718  
   719  func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
   720  	tester := newTester()
   721  	defer tester.terminate()
   722  
   723  	// Create a small enough block chain to download
   724  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   725  
   726  	// Create peers of every type
   727  	tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:])
   728  	// tester.newPeer("peer 65", eth.ETH67, chain.blocks[1:)
   729  
   730  	// Synchronise with the requested peer and make sure all blocks were retrieved
   731  	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
   732  		t.Fatalf("failed to synchronise blocks: %v", err)
   733  	}
   734  	assertOwnChain(t, tester, len(chain.blocks))
   735  
   736  	// Check that no peers have been dropped off
   737  	for _, version := range []int{66} {
   738  		peer := fmt.Sprintf("peer %d", version)
   739  		if _, ok := tester.peers[peer]; !ok {
   740  			t.Errorf("%s dropped", peer)
   741  		}
   742  	}
   743  }
   744  
   745  // Tests that if a block is empty (e.g. header only), no body request should be
   746  // made, and instead the header should be assembled into a whole block in itself.
   747  func TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
   748  func TestEmptyShortCircuit66Snap(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, SnapSync) }
   749  func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
   750  
   751  func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
   752  	tester := newTester()
   753  	defer tester.terminate()
   754  
   755  	// Create a block chain to download
   756  	chain := testChainBase
   757  	tester.newPeer("peer", protocol, chain.blocks[1:])
   758  
   759  	// Instrument the downloader to signal body requests
   760  	bodiesHave, receiptsHave := int32(0), int32(0)
   761  	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
   762  		atomic.AddInt32(&bodiesHave, int32(len(headers)))
   763  	}
   764  	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
   765  		atomic.AddInt32(&receiptsHave, int32(len(headers)))
   766  	}
   767  	// Synchronise with the peer and make sure all blocks were retrieved
   768  	if err := tester.sync("peer", nil, mode); err != nil {
   769  		t.Fatalf("failed to synchronise blocks: %v", err)
   770  	}
   771  	assertOwnChain(t, tester, len(chain.blocks))
   772  
   773  	// Validate the number of block bodies that should have been requested
   774  	bodiesNeeded, receiptsNeeded := 0, 0
   775  	for _, block := range chain.blocks[1:] {
   776  		if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
   777  			bodiesNeeded++
   778  		}
   779  	}
   780  	for _, block := range chain.blocks[1:] {
   781  		if mode == SnapSync && len(block.Transactions()) > 0 {
   782  			receiptsNeeded++
   783  		}
   784  	}
   785  	if int(bodiesHave) != bodiesNeeded {
   786  		t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
   787  	}
   788  	if int(receiptsHave) != receiptsNeeded {
   789  		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
   790  	}
   791  }
   792  
   793  // Tests that headers are enqueued continuously, preventing malicious nodes from
   794  // stalling the downloader by feeding gapped header chains.
   795  func TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
   796  func TestMissingHeaderAttack66Snap(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, SnapSync) }
   797  func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
   798  
   799  func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   800  	tester := newTester()
   801  	defer tester.terminate()
   802  
   803  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   804  
   805  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
   806  	attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{}
   807  
   808  	if err := tester.sync("attack", nil, mode); err == nil {
   809  		t.Fatalf("succeeded attacker synchronisation")
   810  	}
   811  	// Synchronise with the valid peer and make sure sync succeeds
   812  	tester.newPeer("valid", protocol, chain.blocks[1:])
   813  	if err := tester.sync("valid", nil, mode); err != nil {
   814  		t.Fatalf("failed to synchronise blocks: %v", err)
   815  	}
   816  	assertOwnChain(t, tester, len(chain.blocks))
   817  }
   818  
   819  // Tests that if requested headers are shifted (i.e. first is missing), the queue
   820  // detects the invalid numbering.
   821  func TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
   822  func TestShiftedHeaderAttack66Snap(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) }
   823  func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
   824  
   825  func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
   826  	tester := newTester()
   827  	defer tester.terminate()
   828  
   829  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
   830  
   831  	// Attempt a full sync with an attacker feeding shifted headers
   832  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
   833  	attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{}
   834  
   835  	if err := tester.sync("attack", nil, mode); err == nil {
   836  		t.Fatalf("succeeded attacker synchronisation")
   837  	}
   838  	// Synchronise with the valid peer and make sure sync succeeds
   839  	tester.newPeer("valid", protocol, chain.blocks[1:])
   840  	if err := tester.sync("valid", nil, mode); err != nil {
   841  		t.Fatalf("failed to synchronise blocks: %v", err)
   842  	}
   843  	assertOwnChain(t, tester, len(chain.blocks))
   844  }
   845  
   846  // Tests that upon detecting an invalid header, the recent ones are rolled back
   847  // for various failure scenarios. Afterwards a full sync is attempted to make
   848  // sure no state was corrupted.
   849  func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) }
   850  
   851  func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
   852  	tester := newTester()
   853  	defer tester.terminate()
   854  
   855  	// Create a small enough block chain to download
   856  	targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
   857  	chain := testChainBase.shorten(targetBlocks)
   858  
   859  	// Attempt to sync with an attacker that feeds junk during the fast sync phase.
   860  	// This should result in the last fsHeaderSafetyNet headers being rolled back.
   861  	missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
   862  
   863  	fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:])
   864  	fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
   865  
   866  	if err := tester.sync("fast-attack", nil, mode); err == nil {
   867  		t.Fatalf("succeeded fast attacker synchronisation")
   868  	}
   869  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
   870  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
   871  	}
   872  	// Attempt to sync with an attacker that feeds junk during the block import phase.
   873  	// This should result in both the last fsHeaderSafetyNet number of headers being
   874  	// rolled back, and also the pivot point being reverted to a non-block status.
   875  	missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
   876  
   877  	blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:])
   878  	fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in
   879  	blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
   880  
   881  	if err := tester.sync("block-attack", nil, mode); err == nil {
   882  		t.Fatalf("succeeded block attacker synchronisation")
   883  	}
   884  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   885  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   886  	}
   887  	if mode == SnapSync {
   888  		if head := tester.chain.CurrentBlock().NumberU64(); head != 0 {
   889  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   890  		}
   891  	}
   892  	// Attempt to sync with an attacker that withholds promised blocks after the
   893  	// fast sync pivot point. This could be a trial to leave the node with a bad
   894  	// but already imported pivot block.
   895  	withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:])
   896  
   897  	tester.downloader.syncInitHook = func(uint64, uint64) {
   898  		for i := missing; i < len(chain.blocks); i++ {
   899  			withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
   900  		}
   901  		tester.downloader.syncInitHook = nil
   902  	}
   903  	if err := tester.sync("withhold-attack", nil, mode); err == nil {
   904  		t.Fatalf("succeeded withholding attacker synchronisation")
   905  	}
   906  	if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
   907  		t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
   908  	}
   909  	if mode == SnapSync {
   910  		if head := tester.chain.CurrentBlock().NumberU64(); head != 0 {
   911  			t.Errorf("fast sync pivot block #%d not rolled back", head)
   912  		}
   913  	}
   914  	// Synchronise with the valid peer and make sure sync succeeds. Since the last rollback
   915  	// should also disable fast syncing for this process, verify that we did a fresh full
   916  	// sync. Note, we can't assert anything about the receipts since we won't purge the
   917  	// database of them, hence we can't use assertOwnChain.
   918  	tester.newPeer("valid", protocol, chain.blocks[1:])
   919  	if err := tester.sync("valid", nil, mode); err != nil {
   920  		t.Fatalf("failed to synchronise blocks: %v", err)
   921  	}
   922  	assertOwnChain(t, tester, len(chain.blocks))
   923  }
   924  
   925  // Tests that a peer advertising a high TD doesn't get to stall the downloader
   926  // afterwards by not sending any useful hashes.
   927  func TestHighTDStarvationAttack66Full(t *testing.T) {
   928  	testHighTDStarvationAttack(t, eth.ETH66, FullSync)
   929  }
   930  func TestHighTDStarvationAttack66Snap(t *testing.T) {
   931  	testHighTDStarvationAttack(t, eth.ETH66, SnapSync)
   932  }
   933  
   934  func TestHighTDStarvationAttack66Light(t *testing.T) {
   935  	testHighTDStarvationAttack(t, eth.ETH66, LightSync)
   936  }
   937  
   938  func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
   939  	tester := newTester()
   940  	defer tester.terminate()
   941  
   942  	chain := testChainBase.shorten(1)
   943  	tester.newPeer("attack", protocol, chain.blocks[1:])
   944  	if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
   945  		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
   946  	}
   947  }
   948  
   949  // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
   950  func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
   951  
   952  func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
   953  	// Define the disconnection requirement for individual hash fetch errors
   954  	tests := []struct {
   955  		result error
   956  		drop   bool
   957  	}{
   958  		{nil, false},                        // Sync succeeded, all is well
   959  		{errBusy, false},                    // Sync is already in progress, no problem
   960  		{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop
   961  		{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it
   962  		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
   963  		{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it
   964  		{errNoPeers, false},                 // No peers to download from, soft race, no issue
   965  		{errTimeout, true},                  // No hashes received in due time, drop the peer
   966  		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
   967  		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
   968  		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
   969  		{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop
   970  		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
   971  		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
   972  		{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
   973  	}
   974  	// Run the tests and check disconnection status
   975  	tester := newTester()
   976  	defer tester.terminate()
   977  	chain := testChainBase.shorten(1)
   978  
   979  	for i, tt := range tests {
   980  		// Register a new peer and ensure its presence
   981  		id := fmt.Sprintf("test %d", i)
   982  		tester.newPeer(id, protocol, chain.blocks[1:])
   983  		if _, ok := tester.peers[id]; !ok {
   984  			t.Fatalf("test %d: registered peer not found", i)
   985  		}
   986  		// Simulate a synchronisation and check the required result
   987  		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
   988  
   989  		tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync)
   990  		if _, ok := tester.peers[id]; !ok != tt.drop {
   991  			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
   992  		}
   993  	}
   994  }
   995  
   996  // Tests that synchronisation progress (origin block number, current block number
   997  // and highest block number) is tracked and updated correctly.
   998  func TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, eth.ETH66, FullSync) }
   999  func TestSyncProgress66Snap(t *testing.T)  { testSyncProgress(t, eth.ETH66, SnapSync) }
  1000  func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
  1001  
  1002  func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1003  	tester := newTester()
  1004  	defer tester.terminate()
  1005  
  1006  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1007  
  1008  	// Set a sync init hook to catch progress changes
  1009  	starting := make(chan struct{})
  1010  	progress := make(chan struct{})
  1011  
  1012  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1013  		starting <- struct{}{}
  1014  		<-progress
  1015  	}
  1016  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1017  
  1018  	// Synchronise half the blocks and check initial progress
  1019  	tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:])
  1020  	pending := new(sync.WaitGroup)
  1021  	pending.Add(1)
  1022  
  1023  	go func() {
  1024  		defer pending.Done()
  1025  		if err := tester.sync("peer-half", nil, mode); err != nil {
  1026  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1027  		}
  1028  	}()
  1029  	<-starting
  1030  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1031  		HighestBlock: uint64(len(chain.blocks)/2 - 1),
  1032  	})
  1033  	progress <- struct{}{}
  1034  	pending.Wait()
  1035  
  1036  	// Synchronise all the blocks and check continuation progress
  1037  	tester.newPeer("peer-full", protocol, chain.blocks[1:])
  1038  	pending.Add(1)
  1039  	go func() {
  1040  		defer pending.Done()
  1041  		if err := tester.sync("peer-full", nil, mode); err != nil {
  1042  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1043  		}
  1044  	}()
  1045  	<-starting
  1046  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1047  		StartingBlock: uint64(len(chain.blocks)/2 - 1),
  1048  		CurrentBlock:  uint64(len(chain.blocks)/2 - 1),
  1049  		HighestBlock:  uint64(len(chain.blocks) - 1),
  1050  	})
  1051  
  1052  	// Check final progress after successful sync
  1053  	progress <- struct{}{}
  1054  	pending.Wait()
  1055  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1056  		StartingBlock: uint64(len(chain.blocks)/2 - 1),
  1057  		CurrentBlock:  uint64(len(chain.blocks) - 1),
  1058  		HighestBlock:  uint64(len(chain.blocks) - 1),
  1059  	})
  1060  }
  1061  
  1062  func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1063  	// Mark this method as a helper to report errors at callsite, not in here
  1064  	t.Helper()
  1065  
  1066  	p := d.Progress()
  1067  	if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock {
  1068  		t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1069  	}
  1070  }
  1071  
  1072  // Tests that synchronisation progress (origin block number and highest block
  1073  // number) is tracked and updated correctly in case of a fork (or manual head
  1074  // revertal).
  1075  func TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FullSync) }
  1076  func TestForkedSyncProgress66Snap(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, SnapSync) }
  1077  func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
  1078  
  1079  func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1080  	tester := newTester()
  1081  	defer tester.terminate()
  1082  
  1083  	chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
  1084  	chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
  1085  
  1086  	// Set a sync init hook to catch progress changes
  1087  	starting := make(chan struct{})
  1088  	progress := make(chan struct{})
  1089  
  1090  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1091  		starting <- struct{}{}
  1092  		<-progress
  1093  	}
  1094  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1095  
  1096  	// Synchronise with one of the forks and check progress
  1097  	tester.newPeer("fork A", protocol, chainA.blocks[1:])
  1098  	pending := new(sync.WaitGroup)
  1099  	pending.Add(1)
  1100  	go func() {
  1101  		defer pending.Done()
  1102  		if err := tester.sync("fork A", nil, mode); err != nil {
  1103  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1104  		}
  1105  	}()
  1106  	<-starting
  1107  
  1108  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1109  		HighestBlock: uint64(len(chainA.blocks) - 1),
  1110  	})
  1111  	progress <- struct{}{}
  1112  	pending.Wait()
  1113  
  1114  	// Simulate a successful sync above the fork
  1115  	tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1116  
  1117  	// Synchronise with the second fork and check progress resets
  1118  	tester.newPeer("fork B", protocol, chainB.blocks[1:])
  1119  	pending.Add(1)
  1120  	go func() {
  1121  		defer pending.Done()
  1122  		if err := tester.sync("fork B", nil, mode); err != nil {
  1123  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1124  		}
  1125  	}()
  1126  	<-starting
  1127  	checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1128  		StartingBlock: uint64(len(testChainBase.blocks)) - 1,
  1129  		CurrentBlock:  uint64(len(chainA.blocks) - 1),
  1130  		HighestBlock:  uint64(len(chainB.blocks) - 1),
  1131  	})
  1132  
  1133  	// Check final progress after successful sync
  1134  	progress <- struct{}{}
  1135  	pending.Wait()
  1136  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1137  		StartingBlock: uint64(len(testChainBase.blocks)) - 1,
  1138  		CurrentBlock:  uint64(len(chainB.blocks) - 1),
  1139  		HighestBlock:  uint64(len(chainB.blocks) - 1),
  1140  	})
  1141  }
  1142  
  1143  // Tests that if synchronisation is aborted due to some failure, then the progress
  1144  // origin is not updated in the next sync cycle, as it should be considered the
  1145  // continuation of the previous sync and not a new instance.
  1146  func TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FullSync) }
  1147  func TestFailedSyncProgress66Snap(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, SnapSync) }
  1148  func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
  1149  
  1150  func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1151  	tester := newTester()
  1152  	defer tester.terminate()
  1153  
  1154  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1155  
  1156  	// Set a sync init hook to catch progress changes
  1157  	starting := make(chan struct{})
  1158  	progress := make(chan struct{})
  1159  
  1160  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1161  		starting <- struct{}{}
  1162  		<-progress
  1163  	}
  1164  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1165  
  1166  	// Attempt a full sync with a faulty peer
  1167  	missing := len(chain.blocks)/2 - 1
  1168  
  1169  	faulter := tester.newPeer("faulty", protocol, chain.blocks[1:])
  1170  	faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
  1171  
  1172  	pending := new(sync.WaitGroup)
  1173  	pending.Add(1)
  1174  	go func() {
  1175  		defer pending.Done()
  1176  		if err := tester.sync("faulty", nil, mode); err == nil {
  1177  			panic("succeeded faulty synchronisation")
  1178  		}
  1179  	}()
  1180  	<-starting
  1181  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1182  		HighestBlock: uint64(len(chain.blocks) - 1),
  1183  	})
  1184  	progress <- struct{}{}
  1185  	pending.Wait()
  1186  	afterFailedSync := tester.downloader.Progress()
  1187  
  1188  	// Synchronise with a good peer and check that the progress origin remind the same
  1189  	// after a failure
  1190  	tester.newPeer("valid", protocol, chain.blocks[1:])
  1191  	pending.Add(1)
  1192  	go func() {
  1193  		defer pending.Done()
  1194  		if err := tester.sync("valid", nil, mode); err != nil {
  1195  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1196  		}
  1197  	}()
  1198  	<-starting
  1199  	checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1200  
  1201  	// Check final progress after successful sync
  1202  	progress <- struct{}{}
  1203  	pending.Wait()
  1204  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1205  		CurrentBlock: uint64(len(chain.blocks) - 1),
  1206  		HighestBlock: uint64(len(chain.blocks) - 1),
  1207  	})
  1208  }
  1209  
  1210  // Tests that if an attacker fakes a chain height, after the attack is detected,
  1211  // the progress height is successfully reduced at the next sync invocation.
  1212  func TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FullSync) }
  1213  func TestFakedSyncProgress66Snap(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, SnapSync) }
  1214  func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
  1215  
  1216  func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1217  	tester := newTester()
  1218  	defer tester.terminate()
  1219  
  1220  	chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1221  
  1222  	// Set a sync init hook to catch progress changes
  1223  	starting := make(chan struct{})
  1224  	progress := make(chan struct{})
  1225  	tester.downloader.syncInitHook = func(origin, latest uint64) {
  1226  		starting <- struct{}{}
  1227  		<-progress
  1228  	}
  1229  	checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1230  
  1231  	// Create and sync with an attacker that promises a higher chain than available.
  1232  	attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
  1233  	numMissing := 5
  1234  	for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- {
  1235  		attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
  1236  	}
  1237  	pending := new(sync.WaitGroup)
  1238  	pending.Add(1)
  1239  	go func() {
  1240  		defer pending.Done()
  1241  		if err := tester.sync("attack", nil, mode); err == nil {
  1242  			panic("succeeded attacker synchronisation")
  1243  		}
  1244  	}()
  1245  	<-starting
  1246  	checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1247  		HighestBlock: uint64(len(chain.blocks) - 1),
  1248  	})
  1249  	progress <- struct{}{}
  1250  	pending.Wait()
  1251  	afterFailedSync := tester.downloader.Progress()
  1252  
  1253  	// Synchronise with a good peer and check that the progress height has been reduced to
  1254  	// the true value.
  1255  	validChain := chain.shorten(len(chain.blocks) - numMissing)
  1256  	tester.newPeer("valid", protocol, validChain.blocks[1:])
  1257  	pending.Add(1)
  1258  
  1259  	go func() {
  1260  		defer pending.Done()
  1261  		if err := tester.sync("valid", nil, mode); err != nil {
  1262  			panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1263  		}
  1264  	}()
  1265  	<-starting
  1266  	checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1267  		CurrentBlock: afterFailedSync.CurrentBlock,
  1268  		HighestBlock: uint64(len(validChain.blocks) - 1),
  1269  	})
  1270  	// Check final progress after successful sync.
  1271  	progress <- struct{}{}
  1272  	pending.Wait()
  1273  	checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1274  		CurrentBlock: uint64(len(validChain.blocks) - 1),
  1275  		HighestBlock: uint64(len(validChain.blocks) - 1),
  1276  	})
  1277  }
  1278  
  1279  func TestRemoteHeaderRequestSpan(t *testing.T) {
  1280  	testCases := []struct {
  1281  		remoteHeight uint64
  1282  		localHeight  uint64
  1283  		expected     []int
  1284  	}{
  1285  		// Remote is way higher. We should ask for the remote head and go backwards
  1286  		{
  1287  			1500, 1000,
  1288  			[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1289  		},
  1290  		{
  1291  			15000, 13006,
  1292  			[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1293  		},
  1294  		// Remote is pretty close to us. We don't have to fetch as many
  1295  		{
  1296  			1200, 1150,
  1297  			[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1298  		},
  1299  		// Remote is equal to us (so on a fork with higher td)
  1300  		// We should get the closest couple of ancestors
  1301  		{
  1302  			1500, 1500,
  1303  			[]int{1497, 1499},
  1304  		},
  1305  		// We're higher than the remote! Odd
  1306  		{
  1307  			1000, 1500,
  1308  			[]int{997, 999},
  1309  		},
  1310  		// Check some weird edgecases that it behaves somewhat rationally
  1311  		{
  1312  			0, 1500,
  1313  			[]int{0, 2},
  1314  		},
  1315  		{
  1316  			6000000, 0,
  1317  			[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1318  		},
  1319  		{
  1320  			0, 0,
  1321  			[]int{0, 2},
  1322  		},
  1323  	}
  1324  
  1325  	reqs := func(from, count, span int) []int {
  1326  		var r []int
  1327  		num := from
  1328  		for len(r) < count {
  1329  			r = append(r, num)
  1330  			num += span + 1
  1331  		}
  1332  		return r
  1333  	}
  1334  
  1335  	for i, tt := range testCases {
  1336  		i := i
  1337  		tt := tt
  1338  
  1339  		t.Run("", func(t *testing.T) {
  1340  			from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1341  			data := reqs(int(from), count, span)
  1342  
  1343  			if max != uint64(data[len(data)-1]) {
  1344  				t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1345  			}
  1346  			failed := false
  1347  			if len(data) != len(tt.expected) {
  1348  				failed = true
  1349  				t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1350  			} else {
  1351  				for j, n := range data {
  1352  					if n != tt.expected[j] {
  1353  						failed = true
  1354  						break
  1355  					}
  1356  				}
  1357  			}
  1358  
  1359  			if failed {
  1360  				res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1361  				exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1362  				t.Logf("got: %v\n", res)
  1363  				t.Logf("exp: %v\n", exp)
  1364  				t.Errorf("test %d: wrong values", i)
  1365  			}
  1366  		})
  1367  	}
  1368  }
  1369  
  1370  // Tests that peers below a pre-configured checkpoint block are prevented from
  1371  // being fast-synced from, avoiding potential cheap eclipse attacks.
  1372  func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
  1373  func TestCheckpointEnforcement66Snap(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, SnapSync) }
  1374  func TestCheckpointEnforcement66Light(t *testing.T) {
  1375  	testCheckpointEnforcement(t, eth.ETH66, LightSync)
  1376  }
  1377  
  1378  func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
  1379  	// Create a new tester with a particular hard coded checkpoint block
  1380  	tester := newTester()
  1381  	defer tester.terminate()
  1382  
  1383  	tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1384  	chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1385  
  1386  	// Attempt to sync with the peer and validate the result
  1387  	tester.newPeer("peer", protocol, chain.blocks[1:])
  1388  
  1389  	var expect error
  1390  	if mode == SnapSync || mode == LightSync {
  1391  		expect = errUnsyncedPeer
  1392  	}
  1393  
  1394  	if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1395  		t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1396  	}
  1397  
  1398  	if mode == SnapSync || mode == LightSync {
  1399  		assertOwnChain(t, tester, 1)
  1400  	} else {
  1401  		assertOwnChain(t, tester, len(chain.blocks))
  1402  	}
  1403  }
  1404  
  1405  // whitelistFake is a mock for the chain validator service
  1406  type whitelistFake struct {
  1407  	// count denotes the number of times the validate function was called
  1408  	count int
  1409  
  1410  	// validate is the dynamic function to be called while syncing
  1411  	validate func(count int) (bool, error)
  1412  }
  1413  
  1414  // newWhitelistFake returns a new mock whitelist
  1415  func newWhitelistFake(validate func(count int) (bool, error)) *whitelistFake {
  1416  	return &whitelistFake{0, validate}
  1417  }
  1418  
  1419  // IsValidPeer is the mock function which the downloader will use to validate the chain
  1420  // to be received from a peer.
  1421  func (w *whitelistFake) IsValidPeer(_ *types.Header, _ func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) {
  1422  	defer func() {
  1423  		w.count++
  1424  	}()
  1425  
  1426  	return w.validate(w.count)
  1427  }
  1428  
  1429  func (w *whitelistFake) IsValidChain(current *types.Header, headers []*types.Header) (bool, error) {
  1430  	return true, nil
  1431  }
  1432  func (w *whitelistFake) ProcessCheckpoint(_ uint64, _ common.Hash) {}
  1433  
  1434  func (w *whitelistFake) GetCheckpointWhitelist() map[uint64]common.Hash {
  1435  	return nil
  1436  }
  1437  func (w *whitelistFake) PurgeCheckpointWhitelist() {}
  1438  func (w *whitelistFake) GetCheckpoints(current, sidechainHeader *types.Header, sidechainCheckpoints []*types.Header) (map[uint64]*types.Header, error) {
  1439  	return map[uint64]*types.Header{}, nil
  1440  }
  1441  
  1442  // TestFakedSyncProgress66WhitelistMismatch tests if in case of whitelisted
  1443  // checkpoint mismatch with opposite peer, the sync should fail.
  1444  func TestFakedSyncProgress66WhitelistMismatch(t *testing.T) {
  1445  	t.Parallel()
  1446  
  1447  	protocol := uint(eth.ETH66)
  1448  	mode := FullSync
  1449  
  1450  	tester := newTester()
  1451  	validate := func(count int) (bool, error) {
  1452  		return false, whitelist.ErrCheckpointMismatch
  1453  	}
  1454  	tester.downloader.ChainValidator = newWhitelistFake(validate)
  1455  
  1456  	defer tester.terminate()
  1457  
  1458  	chainA := testChainForkLightA.blocks
  1459  	tester.newPeer("light", protocol, chainA[1:])
  1460  
  1461  	// Synchronise with the peer and make sure all blocks were retrieved
  1462  	if err := tester.sync("light", nil, mode); err == nil {
  1463  		t.Fatal("succeeded attacker synchronisation")
  1464  	}
  1465  }
  1466  
  1467  // TestFakedSyncProgress66WhitelistMatch tests if in case of whitelisted
  1468  // checkpoint match with opposite peer, the sync should succeed.
  1469  func TestFakedSyncProgress66WhitelistMatch(t *testing.T) {
  1470  	t.Parallel()
  1471  
  1472  	protocol := uint(eth.ETH66)
  1473  	mode := FullSync
  1474  
  1475  	tester := newTester()
  1476  	validate := func(count int) (bool, error) {
  1477  		return true, nil
  1478  	}
  1479  	tester.downloader.ChainValidator = newWhitelistFake(validate)
  1480  
  1481  	defer tester.terminate()
  1482  
  1483  	chainA := testChainForkLightA.blocks
  1484  	tester.newPeer("light", protocol, chainA[1:])
  1485  
  1486  	// Synchronise with the peer and make sure all blocks were retrieved
  1487  	if err := tester.sync("light", nil, mode); err != nil {
  1488  		t.Fatal("succeeded attacker synchronisation")
  1489  	}
  1490  }
  1491  
  1492  // TestFakedSyncProgress66NoRemoteCheckpoint tests if in case of missing/invalid
  1493  // checkpointed blocks with opposite peer, the sync should fail initially but
  1494  // with the retry mechanism, it should succeed eventually.
  1495  func TestFakedSyncProgress66NoRemoteCheckpoint(t *testing.T) {
  1496  	t.Parallel()
  1497  
  1498  	protocol := uint(eth.ETH66)
  1499  	mode := FullSync
  1500  
  1501  	tester := newTester()
  1502  	validate := func(count int) (bool, error) {
  1503  		// only return the `ErrNoRemoteCheckpoint` error for the first call
  1504  		if count == 0 {
  1505  			return false, whitelist.ErrNoRemoteCheckpoint
  1506  		}
  1507  
  1508  		return true, nil
  1509  	}
  1510  
  1511  	tester.downloader.ChainValidator = newWhitelistFake(validate)
  1512  
  1513  	defer tester.terminate()
  1514  
  1515  	chainA := testChainForkLightA.blocks
  1516  	tester.newPeer("light", protocol, chainA[1:])
  1517  
  1518  	// Synchronise with the peer and make sure all blocks were retrieved
  1519  	// Should fail in first attempt
  1520  	if err := tester.sync("light", nil, mode); err != nil {
  1521  		assert.Equal(t, whitelist.ErrNoRemoteCheckpoint, err, "failed synchronisation")
  1522  	}
  1523  
  1524  	// Try syncing again, should succeed
  1525  	if err := tester.sync("light", nil, mode); err != nil {
  1526  		t.Fatal("succeeded attacker synchronisation")
  1527  	}
  1528  }