github.com/ethereum/go-ethereum@v1.16.1/eth/downloader/skeleton_test.go (about)

     1  // Copyright 2022 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/common"
    29  	"github.com/ethereum/go-ethereum/core/rawdb"
    30  	"github.com/ethereum/go-ethereum/core/types"
    31  	"github.com/ethereum/go-ethereum/eth/protocols/eth"
    32  	"github.com/ethereum/go-ethereum/ethdb"
    33  	"github.com/ethereum/go-ethereum/log"
    34  )
    35  
    36  // hookedBackfiller is a tester backfiller with all interface methods mocked and
    37  // hooked so tests can implement only the things they need.
    38  type hookedBackfiller struct {
    39  	// suspendHook is an optional hook to be called when the filler is requested
    40  	// to be suspended.
    41  	suspendHook func() *types.Header
    42  
    43  	// resumeHook is an optional hook to be called when the filler is requested
    44  	// to be resumed.
    45  	resumeHook func()
    46  }
    47  
    48  // newHookedBackfiller creates a hooked backfiller with all callbacks disabled,
    49  // essentially acting as a noop.
    50  func newHookedBackfiller() backfiller {
    51  	return new(hookedBackfiller)
    52  }
    53  
    54  // suspend requests the backfiller to abort any running full or snap sync
    55  // based on the skeleton chain as it might be invalid. The backfiller should
    56  // gracefully handle multiple consecutive suspends without a resume, even
    57  // on initial startup.
    58  func (hf *hookedBackfiller) suspend() *types.Header {
    59  	if hf.suspendHook != nil {
    60  		return hf.suspendHook()
    61  	}
    62  	return nil // we don't really care about header cleanups for now
    63  }
    64  
    65  // resume requests the backfiller to start running fill or snap sync based on
    66  // the skeleton chain as it has successfully been linked. Appending new heads
    67  // to the end of the chain will not result in suspend/resume cycles.
    68  func (hf *hookedBackfiller) resume() {
    69  	if hf.resumeHook != nil {
    70  		hf.resumeHook()
    71  	}
    72  }
    73  
    74  // skeletonTestPeer is a mock peer that can only serve header requests from a
    75  // pre-perated header chain (which may be arbitrarily wrong for testing).
    76  //
    77  // Requesting anything else from these peers will hard panic. Note, do *not*
    78  // implement any other methods. We actually want to make sure that the skeleton
    79  // syncer only depends on - and will only ever do so - on header requests.
    80  type skeletonTestPeer struct {
    81  	id      string          // Unique identifier of the mock peer
    82  	headers []*types.Header // Headers to serve when requested
    83  
    84  	serve func(origin uint64) []*types.Header // Hook to allow custom responses
    85  
    86  	served  atomic.Uint64 // Number of headers served by this peer
    87  	dropped atomic.Uint64 // Flag whether the peer was dropped (stop responding)
    88  }
    89  
    90  // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with.
    91  func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer {
    92  	return &skeletonTestPeer{
    93  		id:      id,
    94  		headers: headers,
    95  	}
    96  }
    97  
    98  // newSkeletonTestPeerWithHook creates a new mock peer to test the skeleton sync with,
    99  // and sets an optional serve hook that can return headers for delivery instead
   100  // of the predefined chain. Useful for emulating malicious behavior that would
   101  // otherwise require dedicated peer types.
   102  func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer {
   103  	return &skeletonTestPeer{
   104  		id:      id,
   105  		headers: headers,
   106  		serve:   serve,
   107  	}
   108  }
   109  
   110  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   111  // origin; associated with a particular peer in the download tester. The returned
   112  // function can be used to retrieve batches of headers from the particular peer.
   113  func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
   114  	// Since skeleton test peer are in-memory mocks, dropping the does not make
   115  	// them inaccessible. As such, check a local `dropped` field to see if the
   116  	// peer has been dropped and should not respond any more.
   117  	if p.dropped.Load() != 0 {
   118  		return nil, errors.New("peer already dropped")
   119  	}
   120  	// Skeleton sync retrieves batches of headers going backward without gaps.
   121  	// This ensures we can follow a clean parent progression without any reorg
   122  	// hiccups. There is no need for any other type of header retrieval, so do
   123  	// panic if there's such a request.
   124  	if !reverse || skip != 0 {
   125  		// Note, if other clients want to do these kinds of requests, it's their
   126  		// problem, it will still work. We just don't want *us* making complicated
   127  		// requests without a very strong reason to.
   128  		panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip))
   129  	}
   130  	// If the skeleton syncer requests the genesis block, panic. Whilst it could
   131  	// be considered a valid request, our code specifically should not request it
   132  	// ever since we want to link up headers to an existing local chain, which at
   133  	// worse will be the genesis.
   134  	if int64(origin)-int64(amount) < 0 {
   135  		panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount))
   136  	}
   137  	// To make concurrency easier, the skeleton syncer always requests fixed size
   138  	// batches of headers. Panic if the peer is requested an amount other than the
   139  	// configured batch size (apart from the request leading to the genesis).
   140  	if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) {
   141  		panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin))
   142  	}
   143  	// Simple reverse header retrieval. Fill from the peer's chain and return.
   144  	// If the tester has a serve hook set, try to use that before falling back
   145  	// to the default behavior.
   146  	var headers []*types.Header
   147  	if p.serve != nil {
   148  		headers = p.serve(origin)
   149  	}
   150  	if headers == nil {
   151  		headers = make([]*types.Header, 0, amount)
   152  		if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin
   153  			for i := 0; i < amount; i++ {
   154  				// Consider nil headers as a form of attack and withhold them. Nil
   155  				// cannot be decoded from RLP, so it's not possible to produce an
   156  				// attack by sending/receiving those over eth.
   157  				header := p.headers[int(origin)-i]
   158  				if header == nil {
   159  					continue
   160  				}
   161  				headers = append(headers, header)
   162  			}
   163  		}
   164  	}
   165  	p.served.Add(uint64(len(headers)))
   166  
   167  	hashes := make([]common.Hash, len(headers))
   168  	for i, header := range headers {
   169  		hashes[i] = header.Hash()
   170  	}
   171  	// Deliver the headers to the downloader
   172  	req := &eth.Request{
   173  		Peer: p.id,
   174  	}
   175  	res := &eth.Response{
   176  		Req:  req,
   177  		Res:  (*eth.BlockHeadersRequest)(&headers),
   178  		Meta: hashes,
   179  		Time: 1,
   180  		Done: make(chan error),
   181  	}
   182  	go func() {
   183  		sink <- res
   184  		if err := <-res.Done; err != nil {
   185  			log.Warn("Skeleton test peer response rejected", "err", err)
   186  			p.dropped.Add(1)
   187  		}
   188  	}()
   189  	return req, nil
   190  }
   191  
   192  func (p *skeletonTestPeer) Head() (common.Hash, *big.Int) {
   193  	panic("skeleton sync must not request the remote head")
   194  }
   195  
   196  func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) {
   197  	panic("skeleton sync must not request headers by hash")
   198  }
   199  
   200  func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) {
   201  	panic("skeleton sync must not request block bodies")
   202  }
   203  
   204  func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) {
   205  	panic("skeleton sync must not request receipts")
   206  }
   207  
   208  // Tests various sync initializations based on previous leftovers in the database
   209  // and announced heads.
   210  func TestSkeletonSyncInit(t *testing.T) {
   211  	// Create a few key headers
   212  	var (
   213  		genesis  = &types.Header{Number: big.NewInt(0)}
   214  		block49  = &types.Header{Number: big.NewInt(49)}
   215  		block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
   216  		block50  = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
   217  	)
   218  	tests := []struct {
   219  		headers  []*types.Header // Database content (beside the genesis)
   220  		oldstate []*subchain     // Old sync state with various interrupted subchains
   221  		head     *types.Header   // New head header to announce to reorg to
   222  		newstate []*subchain     // Expected sync state after the reorg
   223  	}{
   224  		// Completely empty database with only the genesis set. The sync is expected
   225  		// to create a single subchain with the requested head.
   226  		{
   227  			head:     block50,
   228  			newstate: []*subchain{{Head: 50, Tail: 50}},
   229  		},
   230  		// Empty database with only the genesis set with a leftover empty sync
   231  		// progress. This is a synthetic case, just for the sake of covering things.
   232  		{
   233  			oldstate: []*subchain{},
   234  			head:     block50,
   235  			newstate: []*subchain{{Head: 50, Tail: 50}},
   236  		},
   237  		// A single leftover subchain is present, older than the new head. The
   238  		// old subchain should be left as is and a new one appended to the sync
   239  		// status.
   240  		{
   241  			oldstate: []*subchain{{Head: 10, Tail: 5}},
   242  			head:     block50,
   243  			newstate: []*subchain{
   244  				{Head: 50, Tail: 50},
   245  				{Head: 10, Tail: 5},
   246  			},
   247  		},
   248  		// Multiple leftover subchains are present, older than the new head. The
   249  		// old subchains should be left as is and a new one appended to the sync
   250  		// status.
   251  		{
   252  			oldstate: []*subchain{
   253  				{Head: 20, Tail: 15},
   254  				{Head: 10, Tail: 5},
   255  			},
   256  			head: block50,
   257  			newstate: []*subchain{
   258  				{Head: 50, Tail: 50},
   259  				{Head: 20, Tail: 15},
   260  				{Head: 10, Tail: 5},
   261  			},
   262  		},
   263  		// A single leftover subchain is present, newer than the new head. The
   264  		// newer subchain should be deleted and a fresh one created for the head.
   265  		{
   266  			oldstate: []*subchain{{Head: 65, Tail: 60}},
   267  			head:     block50,
   268  			newstate: []*subchain{{Head: 50, Tail: 50}},
   269  		},
   270  		// Multiple leftover subchain is present, newer than the new head. The
   271  		// newer subchains should be deleted and a fresh one created for the head.
   272  		{
   273  			oldstate: []*subchain{
   274  				{Head: 75, Tail: 70},
   275  				{Head: 65, Tail: 60},
   276  			},
   277  			head:     block50,
   278  			newstate: []*subchain{{Head: 50, Tail: 50}},
   279  		},
   280  
   281  		// Two leftover subchains are present, one fully older and one fully
   282  		// newer than the announced head. The head should delete the newer one,
   283  		// keeping the older one.
   284  		{
   285  			oldstate: []*subchain{
   286  				{Head: 65, Tail: 60},
   287  				{Head: 10, Tail: 5},
   288  			},
   289  			head: block50,
   290  			newstate: []*subchain{
   291  				{Head: 50, Tail: 50},
   292  				{Head: 10, Tail: 5},
   293  			},
   294  		},
   295  		// Multiple leftover subchains are present, some fully older and some
   296  		// fully newer than the announced head. The head should delete the newer
   297  		// ones, keeping the older ones.
   298  		{
   299  			oldstate: []*subchain{
   300  				{Head: 75, Tail: 70},
   301  				{Head: 65, Tail: 60},
   302  				{Head: 20, Tail: 15},
   303  				{Head: 10, Tail: 5},
   304  			},
   305  			head: block50,
   306  			newstate: []*subchain{
   307  				{Head: 50, Tail: 50},
   308  				{Head: 20, Tail: 15},
   309  				{Head: 10, Tail: 5},
   310  			},
   311  		},
   312  		// A single leftover subchain is present and the new head is extending
   313  		// it with one more header. We expect the subchain head to be pushed
   314  		// forward.
   315  		{
   316  			headers:  []*types.Header{block49},
   317  			oldstate: []*subchain{{Head: 49, Tail: 5}},
   318  			head:     block50,
   319  			newstate: []*subchain{{Head: 50, Tail: 5}},
   320  		},
   321  		// A single leftover subchain is present and although the new head does
   322  		// extend it number wise, the hash chain does not link up. We expect a
   323  		// new subchain to be created for the dangling head.
   324  		{
   325  			headers:  []*types.Header{block49B},
   326  			oldstate: []*subchain{{Head: 49, Tail: 5}},
   327  			head:     block50,
   328  			newstate: []*subchain{
   329  				{Head: 50, Tail: 50},
   330  				{Head: 49, Tail: 5},
   331  			},
   332  		},
   333  		// A single leftover subchain is present. A new head is announced that
   334  		// links into the middle of it, correctly anchoring into an existing
   335  		// header. We expect the old subchain to be truncated and extended with
   336  		// the new head.
   337  		{
   338  			headers:  []*types.Header{block49},
   339  			oldstate: []*subchain{{Head: 100, Tail: 5}},
   340  			head:     block50,
   341  			newstate: []*subchain{{Head: 50, Tail: 5}},
   342  		},
   343  		// A single leftover subchain is present. A new head is announced that
   344  		// links into the middle of it, but does not anchor into an existing
   345  		// header. We expect the old subchain to be truncated and a new chain
   346  		// be created for the dangling head.
   347  		{
   348  			headers:  []*types.Header{block49B},
   349  			oldstate: []*subchain{{Head: 100, Tail: 5}},
   350  			head:     block50,
   351  			newstate: []*subchain{
   352  				{Head: 50, Tail: 50},
   353  				{Head: 49, Tail: 5},
   354  			},
   355  		},
   356  	}
   357  	for i, tt := range tests {
   358  		// Create a fresh database and initialize it with the starting state
   359  		db := rawdb.NewMemoryDatabase()
   360  
   361  		rawdb.WriteHeader(db, genesis)
   362  		for _, header := range tt.headers {
   363  			rawdb.WriteSkeletonHeader(db, header)
   364  		}
   365  		if tt.oldstate != nil {
   366  			blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate})
   367  			rawdb.WriteSkeletonSyncStatus(db, blob)
   368  		}
   369  		// Create a skeleton sync and run a cycle
   370  		wait := make(chan struct{})
   371  
   372  		skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
   373  		skeleton.syncStarting = func() { close(wait) }
   374  		skeleton.Sync(tt.head, nil, true)
   375  
   376  		<-wait
   377  		skeleton.Terminate()
   378  
   379  		// Ensure the correct resulting sync status
   380  		expect := skeletonExpect{state: tt.newstate}
   381  		if err := checkSkeletonProgress(db, false, nil, expect); err != nil {
   382  			t.Errorf("test %d: %v", i, err)
   383  		}
   384  	}
   385  }
   386  
   387  // Tests that a running skeleton sync can be extended with properly linked up
   388  // headers but not with side chains.
   389  func TestSkeletonSyncExtend(t *testing.T) {
   390  	// Create a few key headers
   391  	var (
   392  		genesis  = &types.Header{Number: big.NewInt(0)}
   393  		block49  = &types.Header{Number: big.NewInt(49)}
   394  		block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
   395  		block50  = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
   396  		block51  = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()}
   397  	)
   398  	tests := []struct {
   399  		head     *types.Header // New head header to announce to reorg to
   400  		extend   *types.Header // New head header to announce to extend with
   401  		newstate []*subchain   // Expected sync state after the reorg
   402  		err      error         // Whether extension succeeds or not
   403  	}{
   404  		// Initialize a sync and try to extend it with a subsequent block.
   405  		{
   406  			head:   block49,
   407  			extend: block50,
   408  			newstate: []*subchain{
   409  				{Head: 50, Tail: 49},
   410  			},
   411  		},
   412  		// Initialize a sync and try to extend it with the existing head block.
   413  		{
   414  			head:   block49,
   415  			extend: block49,
   416  			newstate: []*subchain{
   417  				{Head: 49, Tail: 49},
   418  			},
   419  		},
   420  		// Initialize a sync and try to extend it with a sibling block.
   421  		{
   422  			head:   block49,
   423  			extend: block49B,
   424  			newstate: []*subchain{
   425  				{Head: 49, Tail: 49},
   426  			},
   427  			err: errChainReorged,
   428  		},
   429  		// Initialize a sync and try to extend it with a number-wise sequential
   430  		// header, but a hash wise non-linking one.
   431  		{
   432  			head:   block49B,
   433  			extend: block50,
   434  			newstate: []*subchain{
   435  				{Head: 49, Tail: 49},
   436  			},
   437  			err: errChainForked,
   438  		},
   439  		// Initialize a sync and try to extend it with a non-linking future block.
   440  		{
   441  			head:   block49,
   442  			extend: block51,
   443  			newstate: []*subchain{
   444  				{Head: 49, Tail: 49},
   445  			},
   446  			err: errChainGapped,
   447  		},
   448  		// Initialize a sync and try to extend it with a past canonical block.
   449  		{
   450  			head:   block50,
   451  			extend: block49,
   452  			newstate: []*subchain{
   453  				{Head: 50, Tail: 50},
   454  			},
   455  			err: errChainReorged,
   456  		},
   457  		// Initialize a sync and try to extend it with a past sidechain block.
   458  		{
   459  			head:   block50,
   460  			extend: block49B,
   461  			newstate: []*subchain{
   462  				{Head: 50, Tail: 50},
   463  			},
   464  			err: errChainReorged,
   465  		},
   466  	}
   467  	for i, tt := range tests {
   468  		// Create a fresh database and initialize it with the starting state
   469  		db := rawdb.NewMemoryDatabase()
   470  		rawdb.WriteHeader(db, genesis)
   471  
   472  		// Create a skeleton sync and run a cycle
   473  		wait := make(chan struct{})
   474  
   475  		skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
   476  		skeleton.syncStarting = func() { close(wait) }
   477  		skeleton.Sync(tt.head, nil, true)
   478  
   479  		<-wait
   480  		if err := skeleton.Sync(tt.extend, nil, false); !errors.Is(err, tt.err) {
   481  			t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err)
   482  		}
   483  		skeleton.Terminate()
   484  
   485  		// Ensure the correct resulting sync status
   486  		expect := skeletonExpect{state: tt.newstate}
   487  		if err := checkSkeletonProgress(db, false, nil, expect); err != nil {
   488  			t.Errorf("test %d: %v", i, err)
   489  		}
   490  	}
   491  }
   492  
   493  type skeletonExpect struct {
   494  	state []*subchain // Expected sync state after the post-init event
   495  	serve uint64      // Expected number of header retrievals after initial cycle
   496  	drop  uint64      // Expected number of peers dropped after initial cycle
   497  }
   498  
   499  type skeletonTest struct {
   500  	fill          bool // Whether to run a real backfiller in this test case
   501  	unpredictable bool // Whether to ignore drops/serves due to uncertain packet assignments
   502  
   503  	head  *types.Header       // New head header to announce to reorg to
   504  	peers []*skeletonTestPeer // Initial peer set to start the sync with
   505  	mid   skeletonExpect
   506  
   507  	newHead *types.Header     // New header to anoint on top of the old one
   508  	newPeer *skeletonTestPeer // New peer to join the skeleton syncer
   509  	end     skeletonExpect
   510  }
   511  
   512  // Tests that the skeleton sync correctly retrieves headers from one or more
   513  // peers without duplicates or other strange side effects.
   514  func TestSkeletonSyncRetrievals(t *testing.T) {
   515  	//log.SetDefault(log.NewLogger(log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))))
   516  
   517  	// Since skeleton headers don't need to be meaningful, beyond a parent hash
   518  	// progression, create a long fake chain to test with.
   519  	chain := []*types.Header{{Number: big.NewInt(0)}}
   520  	for i := 1; i < 10000; i++ {
   521  		chain = append(chain, &types.Header{
   522  			ParentHash: chain[i-1].Hash(),
   523  			Number:     big.NewInt(int64(i)),
   524  		})
   525  	}
   526  	// Some tests require a forking side chain to trigger cornercases.
   527  	var sidechain []*types.Header
   528  	for i := 0; i < len(chain)/2; i++ { // Fork at block #5000
   529  		sidechain = append(sidechain, chain[i])
   530  	}
   531  	for i := len(chain) / 2; i < len(chain); i++ {
   532  		sidechain = append(sidechain, &types.Header{
   533  			ParentHash: sidechain[i-1].Hash(),
   534  			Number:     big.NewInt(int64(i)),
   535  			Extra:      []byte("B"), // force a different hash
   536  		})
   537  	}
   538  	tests := []skeletonTest{
   539  		// Completely empty database with only the genesis set. The sync is expected
   540  		// to create a single subchain with the requested head. No peers however, so
   541  		// the sync should be stuck without any progression.
   542  		//
   543  		// When a new peer is added, it should detect the join and fill the headers
   544  		// to the genesis block.
   545  		{
   546  			head: chain[len(chain)-1],
   547  			mid: skeletonExpect{
   548  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}},
   549  			},
   550  
   551  			newPeer: newSkeletonTestPeer("test-peer", chain),
   552  			end: skeletonExpect{
   553  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   554  				serve: uint64(len(chain) - 2), // len - head - genesis
   555  			},
   556  		},
   557  		// Completely empty database with only the genesis set. The sync is expected
   558  		// to create a single subchain with the requested head. With one valid peer,
   559  		// the sync is expected to complete already in the initial round.
   560  		//
   561  		// Adding a second peer should not have any effect.
   562  		{
   563  			head:  chain[len(chain)-1],
   564  			peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)},
   565  			mid: skeletonExpect{
   566  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   567  				serve: uint64(len(chain) - 2), // len - head - genesis
   568  			},
   569  
   570  			newPeer: newSkeletonTestPeer("test-peer-2", chain),
   571  			end: skeletonExpect{
   572  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   573  				serve: uint64(len(chain) - 2), // len - head - genesis
   574  			},
   575  		},
   576  		// Completely empty database with only the genesis set. The sync is expected
   577  		// to create a single subchain with the requested head. With many valid peers,
   578  		// the sync is expected to complete already in the initial round.
   579  		//
   580  		// Adding a new peer should not have any effect.
   581  		{
   582  			head: chain[len(chain)-1],
   583  			peers: []*skeletonTestPeer{
   584  				newSkeletonTestPeer("test-peer-1", chain),
   585  				newSkeletonTestPeer("test-peer-2", chain),
   586  				newSkeletonTestPeer("test-peer-3", chain),
   587  			},
   588  			mid: skeletonExpect{
   589  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   590  				serve: uint64(len(chain) - 2), // len - head - genesis
   591  			},
   592  
   593  			newPeer: newSkeletonTestPeer("test-peer-4", chain),
   594  			end: skeletonExpect{
   595  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   596  				serve: uint64(len(chain) - 2), // len - head - genesis
   597  			},
   598  		},
   599  		// This test checks if a peer tries to withhold a header - *on* the sync
   600  		// boundary - instead of sending the requested amount. The malicious short
   601  		// package should not be accepted.
   602  		//
   603  		// Joining with a new peer should however unblock the sync.
   604  		{
   605  			head: chain[requestHeaders+100],
   606  			peers: []*skeletonTestPeer{
   607  				newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)),
   608  			},
   609  			mid: skeletonExpect{
   610  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   611  				serve: requestHeaders + 101 - 3, // len - head - genesis - missing
   612  				drop:  1,                        // penalize shortened header deliveries
   613  			},
   614  
   615  			newPeer: newSkeletonTestPeer("good-peer", chain),
   616  			end: skeletonExpect{
   617  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   618  				serve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
   619  				drop:  1,                                      // no new drops
   620  			},
   621  		},
   622  		// This test checks if a peer tries to withhold a header - *off* the sync
   623  		// boundary - instead of sending the requested amount. The malicious short
   624  		// package should not be accepted.
   625  		//
   626  		// Joining with a new peer should however unblock the sync.
   627  		{
   628  			head: chain[requestHeaders+100],
   629  			peers: []*skeletonTestPeer{
   630  				newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)),
   631  			},
   632  			mid: skeletonExpect{
   633  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   634  				serve: requestHeaders + 101 - 3, // len - head - genesis - missing
   635  				drop:  1,                        // penalize shortened header deliveries
   636  			},
   637  
   638  			newPeer: newSkeletonTestPeer("good-peer", chain),
   639  			end: skeletonExpect{
   640  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   641  				serve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
   642  				drop:  1,                                      // no new drops
   643  			},
   644  		},
   645  		// This test checks if a peer tries to duplicate a header - *on* the sync
   646  		// boundary - instead of sending the correct sequence. The malicious duped
   647  		// package should not be accepted.
   648  		//
   649  		// Joining with a new peer should however unblock the sync.
   650  		{
   651  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   652  			peers: []*skeletonTestPeer{
   653  				newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)),
   654  			},
   655  			mid: skeletonExpect{
   656  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   657  				serve: requestHeaders + 101 - 2, // len - head - genesis
   658  				drop:  1,                        // penalize invalid header sequences
   659  			},
   660  
   661  			newPeer: newSkeletonTestPeer("good-peer", chain),
   662  			end: skeletonExpect{
   663  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   664  				serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   665  				drop:  1,                                      // no new drops
   666  			},
   667  		},
   668  		// This test checks if a peer tries to duplicate a header - *off* the sync
   669  		// boundary - instead of sending the correct sequence. The malicious duped
   670  		// package should not be accepted.
   671  		//
   672  		// Joining with a new peer should however unblock the sync.
   673  		{
   674  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   675  			peers: []*skeletonTestPeer{
   676  				newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)),
   677  			},
   678  			mid: skeletonExpect{
   679  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   680  				serve: requestHeaders + 101 - 2, // len - head - genesis
   681  				drop:  1,                        // penalize invalid header sequences
   682  			},
   683  
   684  			newPeer: newSkeletonTestPeer("good-peer", chain),
   685  			end: skeletonExpect{
   686  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   687  				serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   688  				drop:  1,                                      // no new drops
   689  			},
   690  		},
   691  		// This test checks if a peer tries to inject a different header - *on*
   692  		// the sync boundary - instead of sending the correct sequence. The bad
   693  		// package should not be accepted.
   694  		//
   695  		// Joining with a new peer should however unblock the sync.
   696  		{
   697  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   698  			peers: []*skeletonTestPeer{
   699  				newSkeletonTestPeer("header-changer",
   700  					append(
   701  						append(
   702  							append([]*types.Header{}, chain[:99]...),
   703  							&types.Header{
   704  								ParentHash: chain[98].Hash(),
   705  								Number:     big.NewInt(int64(99)),
   706  								GasLimit:   1,
   707  							},
   708  						), chain[100:]...,
   709  					),
   710  				),
   711  			},
   712  			mid: skeletonExpect{
   713  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   714  				serve: requestHeaders + 101 - 2, // len - head - genesis
   715  				drop:  1,                        // different set of headers, drop // TODO(karalabe): maybe just diff sync?
   716  			},
   717  
   718  			newPeer: newSkeletonTestPeer("good-peer", chain),
   719  			end: skeletonExpect{
   720  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   721  				serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   722  				drop:  1,                                      // no new drops
   723  			},
   724  		},
   725  		// This test checks if a peer tries to inject a different header - *off*
   726  		// the sync boundary - instead of sending the correct sequence. The bad
   727  		// package should not be accepted.
   728  		//
   729  		// Joining with a new peer should however unblock the sync.
   730  		{
   731  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   732  			peers: []*skeletonTestPeer{
   733  				newSkeletonTestPeer("header-changer",
   734  					append(
   735  						append(
   736  							append([]*types.Header{}, chain[:50]...),
   737  							&types.Header{
   738  								ParentHash: chain[49].Hash(),
   739  								Number:     big.NewInt(int64(50)),
   740  								GasLimit:   1,
   741  							},
   742  						), chain[51:]...,
   743  					),
   744  				),
   745  			},
   746  			mid: skeletonExpect{
   747  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   748  				serve: requestHeaders + 101 - 2, // len - head - genesis
   749  				drop:  1,                        // different set of headers, drop
   750  			},
   751  
   752  			newPeer: newSkeletonTestPeer("good-peer", chain),
   753  			end: skeletonExpect{
   754  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   755  				serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   756  				drop:  1,                                      // no new drops
   757  			},
   758  		},
   759  		// This test reproduces a bug caught during review (kudos to @holiman)
   760  		// where a subchain is merged with a previously interrupted one, causing
   761  		// pending data in the scratch space to become "invalid" (since we jump
   762  		// ahead during subchain merge). In that case it is expected to ignore
   763  		// the queued up data instead of trying to process on top of a shifted
   764  		// task set.
   765  		//
   766  		// The test is a bit convoluted since it needs to trigger a concurrency
   767  		// issue. First we sync up an initial chain of 2x512 items. Then announce
   768  		// 2x512+2 as head and delay delivering the head batch to fill the scratch
   769  		// space first. The delivery head should merge with the previous download
   770  		// and the scratch space must not be consumed further.
   771  		{
   772  			head: chain[2*requestHeaders],
   773  			peers: []*skeletonTestPeer{
   774  				newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header {
   775  					if origin == chain[2*requestHeaders+1].Number.Uint64() {
   776  						time.Sleep(100 * time.Millisecond)
   777  					}
   778  					return nil // Fallback to default behavior, just delayed
   779  				}),
   780  				newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header {
   781  					if origin == chain[2*requestHeaders+1].Number.Uint64() {
   782  						time.Sleep(100 * time.Millisecond)
   783  					}
   784  					return nil // Fallback to default behavior, just delayed
   785  				}),
   786  			},
   787  			mid: skeletonExpect{
   788  				state: []*subchain{{Head: 2 * requestHeaders, Tail: 1}},
   789  				serve: 2*requestHeaders - 1, // len - head - genesis
   790  			},
   791  
   792  			newHead: chain[2*requestHeaders+2],
   793  			end: skeletonExpect{
   794  				state: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}},
   795  				serve: 4 * requestHeaders,
   796  			},
   797  		},
   798  		// This test reproduces a bug caught by (@rjl493456442) where a skeleton
   799  		// header goes missing, causing the sync to get stuck and/or panic.
   800  		//
   801  		// The setup requires a previously successfully synced chain up to a block
   802  		// height N. That results is a single skeleton header (block N) and a single
   803  		// subchain (head N, Tail N) being stored on disk.
   804  		//
   805  		// The following step requires a new sync cycle to a new side chain of a
   806  		// height higher than N, and an ancestor lower than N (e.g. N-2, N+2).
   807  		// In this scenario, when processing a batch of headers, a link point of
   808  		// N-2 will be found, meaning that N-1 and N have been overwritten.
   809  		//
   810  		// The link event triggers an early exit, noticing that the previous sub-
   811  		// chain is a leftover and deletes it (with it's skeleton header N). But
   812  		// since skeleton header N has been overwritten to the new side chain, we
   813  		// end up losing it and creating a gap.
   814  		{
   815  			fill:          true,
   816  			unpredictable: true, // We have good and bad peer too, bad may be dropped, test too short for certainty
   817  
   818  			head:  chain[len(chain)/2+1], // Sync up until the sidechain common ancestor + 2
   819  			peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-oldchain", chain)},
   820  			mid: skeletonExpect{
   821  				state: []*subchain{{Head: uint64(len(chain)/2 + 1), Tail: 1}},
   822  			},
   823  
   824  			newHead: sidechain[len(sidechain)/2+3], // Sync up until the sidechain common ancestor + 4
   825  			newPeer: newSkeletonTestPeer("test-peer-newchain", sidechain),
   826  			end: skeletonExpect{
   827  				state: []*subchain{{Head: uint64(len(sidechain)/2 + 3), Tail: uint64(len(chain) / 2)}},
   828  			},
   829  		},
   830  	}
   831  	for i, tt := range tests {
   832  		// Create a fresh database and initialize it with the starting state
   833  		db := rawdb.NewMemoryDatabase()
   834  
   835  		rawdb.WriteBlock(db, types.NewBlockWithHeader(chain[0]))
   836  		rawdb.WriteReceipts(db, chain[0].Hash(), chain[0].Number.Uint64(), types.Receipts{})
   837  
   838  		// Create a peer set to feed headers through
   839  		peerset := newPeerSet()
   840  		for _, peer := range tt.peers {
   841  			peerset.Register(newPeerConnection(peer.id, eth.ETH68, peer, log.New("id", peer.id)))
   842  		}
   843  		// Create a peer dropper to track malicious peers
   844  		dropped := make(map[string]int)
   845  		drop := func(peer string) {
   846  			if p := peerset.Peer(peer); p != nil {
   847  				p.peer.(*skeletonTestPeer).dropped.Add(1)
   848  			}
   849  			peerset.Unregister(peer)
   850  			dropped[peer]++
   851  		}
   852  		// Create a backfiller if we need to run more advanced tests
   853  		filler := newHookedBackfiller()
   854  		if tt.fill {
   855  			var filled *types.Header
   856  
   857  			filler = &hookedBackfiller{
   858  				resumeHook: func() {
   859  					var progress skeletonProgress
   860  					json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
   861  
   862  					for progress.Subchains[0].Tail < progress.Subchains[0].Head {
   863  						header := rawdb.ReadSkeletonHeader(db, progress.Subchains[0].Tail)
   864  
   865  						rawdb.WriteBlock(db, types.NewBlockWithHeader(header))
   866  						rawdb.WriteReceipts(db, header.Hash(), header.Number.Uint64(), types.Receipts{})
   867  
   868  						rawdb.DeleteSkeletonHeader(db, header.Number.Uint64())
   869  
   870  						progress.Subchains[0].Tail++
   871  						progress.Subchains[0].Next = header.Hash()
   872  					}
   873  					filled = rawdb.ReadSkeletonHeader(db, progress.Subchains[0].Tail)
   874  
   875  					rawdb.WriteBlock(db, types.NewBlockWithHeader(filled))
   876  					rawdb.WriteReceipts(db, filled.Hash(), filled.Number.Uint64(), types.Receipts{})
   877  				},
   878  
   879  				suspendHook: func() *types.Header {
   880  					prev := filled
   881  					filled = nil
   882  
   883  					return prev
   884  				},
   885  			}
   886  		}
   887  		// Create a skeleton sync and run a cycle
   888  		skeleton := newSkeleton(db, peerset, drop, filler)
   889  		skeleton.Sync(tt.head, nil, true)
   890  
   891  		// Wait a bit (bleah) for the initial sync loop to go to idle. This might
   892  		// be either a finish or a never-start hence why there's no event to hook.
   893  		waitStart := time.Now()
   894  		for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 {
   895  			time.Sleep(waitTime)
   896  			if err := checkSkeletonProgress(db, tt.unpredictable, tt.peers, tt.mid); err == nil {
   897  				break
   898  			}
   899  		}
   900  		if err := checkSkeletonProgress(db, tt.unpredictable, tt.peers, tt.mid); err != nil {
   901  			t.Errorf("test %d, mid: %v", i, err)
   902  			continue
   903  		}
   904  
   905  		// Apply the post-init events if there's any
   906  		endpeers := tt.peers
   907  		if tt.newPeer != nil {
   908  			if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH68, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
   909  				t.Errorf("test %d: failed to register new peer: %v", i, err)
   910  			}
   911  			time.Sleep(time.Millisecond * 50) // given time for peer registration
   912  			endpeers = append(tt.peers, tt.newPeer)
   913  		}
   914  		if tt.newHead != nil {
   915  			skeleton.Sync(tt.newHead, nil, true)
   916  		}
   917  
   918  		// Wait a bit (bleah) for the second sync loop to go to idle. This might
   919  		// be either a finish or a never-start hence why there's no event to hook.
   920  		waitStart = time.Now()
   921  		for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 {
   922  			time.Sleep(waitTime)
   923  			if err := checkSkeletonProgress(db, tt.unpredictable, endpeers, tt.end); err == nil {
   924  				break
   925  			}
   926  		}
   927  		if err := checkSkeletonProgress(db, tt.unpredictable, endpeers, tt.end); err != nil {
   928  			t.Errorf("test %d, end: %v", i, err)
   929  			continue
   930  		}
   931  		// Check that the peers served no more headers than we actually needed
   932  		// Clean up any leftover skeleton sync resources
   933  		skeleton.Terminate()
   934  	}
   935  }
   936  
   937  func checkSkeletonProgress(db ethdb.KeyValueReader, unpredictable bool, peers []*skeletonTestPeer, expected skeletonExpect) error {
   938  	var progress skeletonProgress
   939  	// Check the post-init end state if it matches the required results
   940  	json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
   941  
   942  	if len(progress.Subchains) != len(expected.state) {
   943  		return fmt.Errorf("subchain count mismatch: have %d, want %d", len(progress.Subchains), len(expected.state))
   944  	}
   945  	for j := 0; j < len(progress.Subchains); j++ {
   946  		if progress.Subchains[j].Head != expected.state[j].Head {
   947  			return fmt.Errorf("subchain %d head mismatch: have %d, want %d", j, progress.Subchains[j].Head, expected.state[j].Head)
   948  		}
   949  		if progress.Subchains[j].Tail != expected.state[j].Tail {
   950  			return fmt.Errorf("subchain %d tail mismatch: have %d, want %d", j, progress.Subchains[j].Tail, expected.state[j].Tail)
   951  		}
   952  	}
   953  	if !unpredictable {
   954  		var served uint64
   955  		for _, peer := range peers {
   956  			served += peer.served.Load()
   957  		}
   958  		if served != expected.serve {
   959  			return fmt.Errorf("served headers mismatch: have %d, want %d", served, expected.serve)
   960  		}
   961  		var drops uint64
   962  		for _, peer := range peers {
   963  			drops += peer.dropped.Load()
   964  		}
   965  		if drops != expected.drop {
   966  			return fmt.Errorf("dropped peers mismatch: have %d, want %d", drops, expected.drop)
   967  		}
   968  	}
   969  	return nil
   970  }