github.com/theQRL/go-zond@v0.2.1/zond/downloader/skeleton_test.go (about)

     1  // Copyright 2022 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  // TODO(now.youtrack.cloud/issue/TGZ-8)
    20  /*
    21  import (
    22  	"encoding/json"
    23  	"errors"
    24  	"fmt"
    25  	"math/big"
    26  	"sync/atomic"
    27  	"testing"
    28  
    29  	"github.com/theQRL/go-zond/common"
    30  	"github.com/theQRL/go-zond/core/rawdb"
    31  	"github.com/theQRL/go-zond/core/types"
    32  	"github.com/theQRL/go-zond/log"
    33  	"github.com/theQRL/go-zond/zond/protocols/zond"
    34  	"github.com/theQRL/go-zond/zonddb"
    35  )
    36  
    37  // hookedBackfiller is a tester backfiller with all interface methods mocked and
    38  // hooked so tests can implement only the things they need.
    39  type hookedBackfiller struct {
    40  	// suspendHook is an optional hook to be called when the filler is requested
    41  	// to be suspended.
    42  	suspendHook func() *types.Header
    43  
    44  	// resumeHook is an optional hook to be called when the filler is requested
    45  	// to be resumed.
    46  	resumeHook func()
    47  }
    48  
    49  // newHookedBackfiller creates a hooked backfiller with all callbacks disabled,
    50  // essentially acting as a noop.
    51  func newHookedBackfiller() backfiller {
    52  	return new(hookedBackfiller)
    53  }
    54  
    55  // suspend requests the backfiller to abort any running full or snap sync
    56  // based on the skeleton chain as it might be invalid. The backfiller should
    57  // gracefully handle multiple consecutive suspends without a resume, even
    58  // on initial startup.
    59  func (hf *hookedBackfiller) suspend() *types.Header {
    60  	if hf.suspendHook != nil {
    61  		return hf.suspendHook()
    62  	}
    63  	return nil // we don't really care about header cleanups for now
    64  }
    65  
    66  // resume requests the backfiller to start running fill or snap sync based on
    67  // the skeleton chain as it has successfully been linked. Appending new heads
    68  // to the end of the chain will not result in suspend/resume cycles.
    69  func (hf *hookedBackfiller) resume() {
    70  	if hf.resumeHook != nil {
    71  		hf.resumeHook()
    72  	}
    73  }
    74  
    75  // skeletonTestPeer is a mock peer that can only serve header requests from a
    76  // pre-perated header chain (which may be arbitrarily wrong for testing).
    77  //
    78  // Requesting anything else from these peers will hard panic. Note, do *not*
    79  // implement any other methods. We actually want to make sure that the skeleton
    80  // syncer only depends on - and will only ever do so - on header requests.
    81  type skeletonTestPeer struct {
    82  	id      string          // Unique identifier of the mock peer
    83  	headers []*types.Header // Headers to serve when requested
    84  
    85  	serve func(origin uint64) []*types.Header // Hook to allow custom responses
    86  
    87  	served  atomic.Uint64 // Number of headers served by this peer
    88  	dropped atomic.Uint64 // Flag whether the peer was dropped (stop responding)
    89  }
    90  
    91  // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with.
    92  func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer {
    93  	return &skeletonTestPeer{
    94  		id:      id,
    95  		headers: headers,
    96  	}
    97  }
    98  
    99  // newSkeletonTestPeerWithHook creates a new mock peer to test the skeleton sync with,
   100  // and sets an optional serve hook that can return headers for delivery instead
   101  // of the predefined chain. Useful for emulating malicious behavior that would
   102  // otherwise require dedicated peer types.
   103  func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer {
   104  	return &skeletonTestPeer{
   105  		id:      id,
   106  		headers: headers,
   107  		serve:   serve,
   108  	}
   109  }
   110  
   111  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   112  // origin; associated with a particular peer in the download tester. The returned
   113  // function can be used to retrieve batches of headers from the particular peer.
   114  func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *zond.Response) (*zond.Request, error) {
   115  	// Since skeleton test peer are in-memory mocks, dropping the does not make
   116  	// them inaccessible. As such, check a local `dropped` field to see if the
   117  	// peer has been dropped and should not respond any more.
   118  	if p.dropped.Load() != 0 {
   119  		return nil, errors.New("peer already dropped")
   120  	}
   121  	// Skeleton sync retrieves batches of headers going backward without gaps.
   122  	// This ensures we can follow a clean parent progression without any reorg
   123  	// hiccups. There is no need for any other type of header retrieval, so do
   124  	// panic if there's such a request.
   125  	if !reverse || skip != 0 {
   126  		// Note, if other clients want to do these kinds of requests, it's their
   127  		// problem, it will still work. We just don't want *us* making complicated
   128  		// requests without a very strong reason to.
   129  		panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip))
   130  	}
   131  	// If the skeleton syncer requests the genesis block, panic. Whilst it could
   132  	// be considered a valid request, our code specifically should not request it
   133  	// ever since we want to link up headers to an existing local chain, which at
   134  	// worse will be the genesis.
   135  	if int64(origin)-int64(amount) < 0 {
   136  		panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount))
   137  	}
   138  	// To make concurrency easier, the skeleton syncer always requests fixed size
   139  	// batches of headers. Panic if the peer is requested an amount other than the
   140  	// configured batch size (apart from the request leading to the genesis).
   141  	if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) {
   142  		panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin))
   143  	}
   144  	// Simple reverse header retrieval. Fill from the peer's chain and return.
   145  	// If the tester has a serve hook set, try to use that before falling back
   146  	// to the default behavior.
   147  	var headers []*types.Header
   148  	if p.serve != nil {
   149  		headers = p.serve(origin)
   150  	}
   151  	if headers == nil {
   152  		headers = make([]*types.Header, 0, amount)
   153  		if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin
   154  			for i := 0; i < amount; i++ {
   155  				// Consider nil headers as a form of attack and withhold them. Nil
   156  				// cannot be decoded from RLP, so it's not possible to produce an
   157  				// attack by sending/receiving those over zond.
   158  				header := p.headers[int(origin)-i]
   159  				if header == nil {
   160  					continue
   161  				}
   162  				headers = append(headers, header)
   163  			}
   164  		}
   165  	}
   166  	p.served.Add(uint64(len(headers)))
   167  
   168  	hashes := make([]common.Hash, len(headers))
   169  	for i, header := range headers {
   170  		hashes[i] = header.Hash()
   171  	}
   172  	// Deliver the headers to the downloader
   173  	req := &zond.Request{
   174  		Peer: p.id,
   175  	}
   176  	res := &zond.Response{
   177  		Req:  req,
   178  		Res:  (*zond.BlockHeadersRequest)(&headers),
   179  		Meta: hashes,
   180  		Time: 1,
   181  		Done: make(chan error),
   182  	}
   183  	go func() {
   184  		sink <- res
   185  		if err := <-res.Done; err != nil {
   186  			log.Warn("Skeleton test peer response rejected", "err", err)
   187  			p.dropped.Add(1)
   188  		}
   189  	}()
   190  	return req, nil
   191  }
   192  
   193  func (p *skeletonTestPeer) Head() common.Hash {
   194  	panic("skeleton sync must not request the remote head")
   195  }
   196  
   197  func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *zond.Response) (*zond.Request, error) {
   198  	panic("skeleton sync must not request headers by hash")
   199  }
   200  
   201  func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *zond.Response) (*zond.Request, error) {
   202  	panic("skeleton sync must not request block bodies")
   203  }
   204  
   205  func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *zond.Response) (*zond.Request, error) {
   206  	panic("skeleton sync must not request receipts")
   207  }
   208  
   209  // Tests various sync initializations based on previous leftovers in the database
   210  // and announced heads.
   211  func TestSkeletonSyncInit(t *testing.T) {
   212  	// Create a few key headers
   213  	var (
   214  		genesis = &types.Header{Number: big.NewInt(0)}
   215  		// block49 = &types.Header{Number: big.NewInt(49)}
   216  		// block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
   217  		// block50  = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
   218  	)
   219  	tests := []struct {
   220  		headers  []*types.Header // Database content (beside the genesis)
   221  		oldstate []*subchain     // Old sync state with various interrupted subchains
   222  		head     *types.Header   // New head header to announce to reorg to
   223  		newstate []*subchain     // Expected sync state after the reorg
   224  	}{
   225  
   226  			// Completely empty database with only the genesis set. The sync is expected
   227  			// to create a single subchain with the requested head.
   228  			{
   229  				head:     block50,
   230  				newstate: []*subchain{{Head: 50, Tail: 50}},
   231  			},
   232  			// Empty database with only the genesis set with a leftover empty sync
   233  			// progress. This is a synthetic case, just for the sake of covering things.
   234  			{
   235  				oldstate: []*subchain{},
   236  				head:     block50,
   237  				newstate: []*subchain{{Head: 50, Tail: 50}},
   238  			},
   239  			// A single leftover subchain is present, older than the new head. The
   240  			// old subchain should be left as is and a new one appended to the sync
   241  			// status.
   242  			{
   243  				oldstate: []*subchain{{Head: 10, Tail: 5}},
   244  				head:     block50,
   245  				newstate: []*subchain{
   246  					{Head: 50, Tail: 50},
   247  					{Head: 10, Tail: 5},
   248  				},
   249  			},
   250  			// Multiple leftover subchains are present, older than the new head. The
   251  			// old subchains should be left as is and a new one appended to the sync
   252  			// status.
   253  			{
   254  				oldstate: []*subchain{
   255  					{Head: 20, Tail: 15},
   256  					{Head: 10, Tail: 5},
   257  				},
   258  				head: block50,
   259  				newstate: []*subchain{
   260  					{Head: 50, Tail: 50},
   261  					{Head: 20, Tail: 15},
   262  					{Head: 10, Tail: 5},
   263  				},
   264  			},
   265  			// A single leftover subchain is present, newer than the new head. The
   266  			// newer subchain should be deleted and a fresh one created for the head.
   267  			{
   268  				oldstate: []*subchain{{Head: 65, Tail: 60}},
   269  				head:     block50,
   270  				newstate: []*subchain{{Head: 50, Tail: 50}},
   271  			},
   272  			// Multiple leftover subchain is present, newer than the new head. The
   273  			// newer subchains should be deleted and a fresh one created for the head.
   274  			{
   275  				oldstate: []*subchain{
   276  					{Head: 75, Tail: 70},
   277  					{Head: 65, Tail: 60},
   278  				},
   279  				head:     block50,
   280  				newstate: []*subchain{{Head: 50, Tail: 50}},
   281  			},
   282  
   283  			// Two leftover subchains are present, one fully older and one fully
   284  			// newer than the announced head. The head should delete the newer one,
   285  			// keeping the older one.
   286  			{
   287  				oldstate: []*subchain{
   288  					{Head: 65, Tail: 60},
   289  					{Head: 10, Tail: 5},
   290  				},
   291  				head: block50,
   292  				newstate: []*subchain{
   293  					{Head: 50, Tail: 50},
   294  					{Head: 10, Tail: 5},
   295  				},
   296  			},
   297  			// Multiple leftover subchains are present, some fully older and some
   298  			// fully newer than the announced head. The head should delete the newer
   299  			// ones, keeping the older ones.
   300  			{
   301  				oldstate: []*subchain{
   302  					{Head: 75, Tail: 70},
   303  					{Head: 65, Tail: 60},
   304  					{Head: 20, Tail: 15},
   305  					{Head: 10, Tail: 5},
   306  				},
   307  				head: block50,
   308  				newstate: []*subchain{
   309  					{Head: 50, Tail: 50},
   310  					{Head: 20, Tail: 15},
   311  					{Head: 10, Tail: 5},
   312  				},
   313  			},
   314  			// A single leftover subchain is present and the new head is extending
   315  			// it with one more header. We expect the subchain head to be pushed
   316  			// forward.
   317  			{
   318  				headers:  []*types.Header{block49},
   319  				oldstate: []*subchain{{Head: 49, Tail: 5}},
   320  				head:     block50,
   321  				newstate: []*subchain{{Head: 50, Tail: 5}},
   322  			},
   323  			// A single leftover subchain is present and although the new head does
   324  			// extend it number wise, the hash chain does not link up. We expect a
   325  			// new subchain to be created for the dangling head.
   326  			{
   327  				headers:  []*types.Header{block49B},
   328  				oldstate: []*subchain{{Head: 49, Tail: 5}},
   329  				head:     block50,
   330  				newstate: []*subchain{
   331  					{Head: 50, Tail: 50},
   332  					{Head: 49, Tail: 5},
   333  				},
   334  			},
   335  			// A single leftover subchain is present. A new head is announced that
   336  			// links into the middle of it, correctly anchoring into an existing
   337  			// header. We expect the old subchain to be truncated and extended with
   338  			// the new head.
   339  			{
   340  				headers:  []*types.Header{block49},
   341  				oldstate: []*subchain{{Head: 100, Tail: 5}},
   342  				head:     block50,
   343  				newstate: []*subchain{{Head: 50, Tail: 5}},
   344  			},
   345  			// A single leftover subchain is present. A new head is announced that
   346  			// links into the middle of it, but does not anchor into an existing
   347  			// header. We expect the old subchain to be truncated and a new chain
   348  			// be created for the dangling head.
   349  			{
   350  				headers:  []*types.Header{block49B},
   351  				oldstate: []*subchain{{Head: 100, Tail: 5}},
   352  				head:     block50,
   353  				newstate: []*subchain{
   354  					{Head: 50, Tail: 50},
   355  					{Head: 49, Tail: 5},
   356  				},
   357  			},
   358  	}
   359  	for i, tt := range tests {
   360  		// Create a fresh database and initialize it with the starting state
   361  		db := rawdb.NewMemoryDatabase()
   362  
   363  		rawdb.WriteHeader(db, genesis)
   364  		for _, header := range tt.headers {
   365  			rawdb.WriteSkeletonHeader(db, header)
   366  		}
   367  		if tt.oldstate != nil {
   368  			blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate})
   369  			rawdb.WriteSkeletonSyncStatus(db, blob)
   370  		}
   371  		// Create a skeleton sync and run a cycle
   372  		wait := make(chan struct{})
   373  
   374  		skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
   375  		skeleton.syncStarting = func() { close(wait) }
   376  		skeleton.Sync(tt.head, nil, true)
   377  
   378  		<-wait
   379  		skeleton.Terminate()
   380  
   381  		// Ensure the correct resulting sync status
   382  		expect := skeletonExpect{state: tt.newstate}
   383  		if err := checkSkeletonProgress(db, false, nil, expect); err != nil {
   384  			t.Errorf("test %d: %v", i, err)
   385  		}
   386  	}
   387  }
   388  
   389  // Tests that a running skeleton sync can be extended with properly linked up
   390  // headers but not with side chains.
   391  func TestSkeletonSyncExtend(t *testing.T) {
   392  	// Create a few key headers
   393  	var (
   394  		genesis = &types.Header{Number: big.NewInt(0)}
   395  		block49 = &types.Header{Number: big.NewInt(49)}
   396  		// block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
   397  		block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
   398  		// block51  = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()}
   399  	)
   400  	tests := []struct {
   401  		head     *types.Header // New head header to announce to reorg to
   402  		extend   *types.Header // New head header to announce to extend with
   403  		newstate []*subchain   // Expected sync state after the reorg
   404  		err      error         // Whether extension succeeds or not
   405  	}{
   406  
   407  		// Initialize a sync and try to extend it with a subsequent block.
   408  		{
   409  			head:   block49,
   410  			extend: block50,
   411  			newstate: []*subchain{
   412  				{Head: 50, Tail: 49},
   413  			},
   414  		},
   415  			// Initialize a sync and try to extend it with the existing head block.
   416  			{
   417  				head:   block49,
   418  				extend: block49,
   419  				newstate: []*subchain{
   420  					{Head: 49, Tail: 49},
   421  				},
   422  			},
   423  			// Initialize a sync and try to extend it with a sibling block.
   424  			{
   425  				head:   block49,
   426  				extend: block49B,
   427  				newstate: []*subchain{
   428  					{Head: 49, Tail: 49},
   429  				},
   430  				err: errChainReorged,
   431  			},
   432  			// Initialize a sync and try to extend it with a number-wise sequential
   433  			// header, but a hash wise non-linking one.
   434  			{
   435  				head:   block49B,
   436  				extend: block50,
   437  				newstate: []*subchain{
   438  					{Head: 49, Tail: 49},
   439  				},
   440  				err: errChainForked,
   441  			},
   442  			// Initialize a sync and try to extend it with a non-linking future block.
   443  			{
   444  				head:   block49,
   445  				extend: block51,
   446  				newstate: []*subchain{
   447  					{Head: 49, Tail: 49},
   448  				},
   449  				err: errChainGapped,
   450  			},
   451  			// Initialize a sync and try to extend it with a past canonical block.
   452  			{
   453  				head:   block50,
   454  				extend: block49,
   455  				newstate: []*subchain{
   456  					{Head: 50, Tail: 50},
   457  				},
   458  				err: errChainReorged,
   459  			},
   460  			/*
   461  			// Initialize a sync and try to extend it with a past sidechain block.
   462  			{
   463  				head:   block50,
   464  				extend: block49B,
   465  				newstate: []*subchain{
   466  					{Head: 50, Tail: 50},
   467  				},
   468  				err: errChainReorged,
   469  			},
   470  	}
   471  	for i, tt := range tests {
   472  		// Create a fresh database and initialize it with the starting state
   473  		db := rawdb.NewMemoryDatabase()
   474  		rawdb.WriteHeader(db, genesis)
   475  
   476  		// Create a skeleton sync and run a cycle
   477  		wait := make(chan struct{})
   478  
   479  		skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
   480  		skeleton.syncStarting = func() { close(wait) }
   481  		skeleton.Sync(tt.head, nil, true)
   482  
   483  		<-wait
   484  		if err := skeleton.Sync(tt.extend, nil, false); !errors.Is(err, tt.err) {
   485  			t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err)
   486  		}
   487  		skeleton.Terminate()
   488  
   489  		// Ensure the correct resulting sync status
   490  		expect := skeletonExpect{state: tt.newstate}
   491  		if err := checkSkeletonProgress(db, false, nil, expect); err != nil {
   492  			t.Errorf("test %d: %v", i, err)
   493  		}
   494  	}
   495  }
   496  
   497  type skeletonExpect struct {
   498  	state []*subchain // Expected sync state after the post-init event
   499  	serve uint64      // Expected number of header retrievals after initial cycle
   500  	drop  uint64      // Expected number of peers dropped after initial cycle
   501  }
   502  
   503  type skeletonTest struct {
   504  	fill          bool // Whether to run a real backfiller in this test case
   505  	unpredictable bool // Whether to ignore drops/serves due to uncertain packet assignments
   506  
   507  	head  *types.Header       // New head header to announce to reorg to
   508  	peers []*skeletonTestPeer // Initial peer set to start the sync with
   509  	mid   skeletonExpect
   510  
   511  	newHead *types.Header     // New header to anoint on top of the old one
   512  	newPeer *skeletonTestPeer // New peer to join the skeleton syncer
   513  	end     skeletonExpect
   514  }
   515  
   516  // NOTE(rgeraldes24): tests not valid
   517  // Tests that the skeleton sync correctly retrieves headers from one or more
   518  // peers without duplicates or other strange side effects.
   519  func TestSkeletonSyncRetrievals(t *testing.T) {
   520  	//log.SetDefault(log.NewLogger(log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))))
   521  
   522  	// Since skeleton headers don't need to be meaningful, beyond a parent hash
   523  	// progression, create a long fake chain to test with.
   524  	chain := []*types.Header{{Number: big.NewInt(0)}}
   525  	for i := 1; i < 10000; i++ {
   526  		chain = append(chain, &types.Header{
   527  			ParentHash: chain[i-1].Hash(),
   528  			Number:     big.NewInt(int64(i)),
   529  		})
   530  	}
   531  	// Some tests require a forking side chain to trigger cornercases.
   532  	var sidechain []*types.Header
   533  	for i := 0; i < len(chain)/2; i++ { // Fork at block #5000
   534  		sidechain = append(sidechain, chain[i])
   535  	}
   536  	for i := len(chain) / 2; i < len(chain); i++ {
   537  		sidechain = append(sidechain, &types.Header{
   538  			ParentHash: sidechain[i-1].Hash(),
   539  			Number:     big.NewInt(int64(i)),
   540  			Extra:      []byte("B"), // force a different hash
   541  		})
   542  	}
   543  	tests := []skeletonTest{
   544  		// Completely empty database with only the genesis set. The sync is expected
   545  		// to create a single subchain with the requested head. No peers however, so
   546  		// the sync should be stuck without any progression.
   547  		//
   548  		// When a new peer is added, it should detect the join and fill the headers
   549  		// to the genesis block.
   550  		{
   551  			head: chain[len(chain)-1],
   552  			mid: skeletonExpect{
   553  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}},
   554  			},
   555  
   556  			newPeer: newSkeletonTestPeer("test-peer", chain),
   557  			end: skeletonExpect{
   558  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   559  				serve: uint64(len(chain) - 2), // len - head - genesis
   560  			},
   561  		},
   562  
   563  		// Completely empty database with only the genesis set. The sync is expected
   564  		// to create a single subchain with the requested head. With one valid peer,
   565  		// the sync is expected to complete already in the initial round.
   566  		//
   567  		// Adding a second peer should not have any effect.
   568  		{
   569  			head:  chain[len(chain)-1],
   570  			peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)},
   571  			mid: skeletonExpect{
   572  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   573  				serve: uint64(len(chain) - 2), // len - head - genesis
   574  			},
   575  
   576  			newPeer: newSkeletonTestPeer("test-peer-2", chain),
   577  			end: skeletonExpect{
   578  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   579  				serve: uint64(len(chain) - 2), // len - head - genesis
   580  			},
   581  		},
   582  		// Completely empty database with only the genesis set. The sync is expected
   583  		// to create a single subchain with the requested head. With many valid peers,
   584  		// the sync is expected to complete already in the initial round.
   585  		//
   586  		// Adding a new peer should not have any effect.
   587  		{
   588  			head: chain[len(chain)-1],
   589  			peers: []*skeletonTestPeer{
   590  				newSkeletonTestPeer("test-peer-1", chain),
   591  				newSkeletonTestPeer("test-peer-2", chain),
   592  				newSkeletonTestPeer("test-peer-3", chain),
   593  			},
   594  			mid: skeletonExpect{
   595  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   596  				serve: uint64(len(chain) - 2), // len - head - genesis
   597  			},
   598  
   599  			newPeer: newSkeletonTestPeer("test-peer-4", chain),
   600  			end: skeletonExpect{
   601  				state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   602  				serve: uint64(len(chain) - 2), // len - head - genesis
   603  			},
   604  		},
   605  		// This test checks if a peer tries to withhold a header - *on* the sync
   606  		// boundary - instead of sending the requested amount. The malicious short
   607  		// package should not be accepted.
   608  		//
   609  		// Joining with a new peer should however unblock the sync.
   610  		{
   611  			head: chain[requestHeaders+100],
   612  			peers: []*skeletonTestPeer{
   613  				newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)),
   614  			},
   615  			mid: skeletonExpect{
   616  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   617  				serve: requestHeaders + 101 - 3, // len - head - genesis - missing
   618  				drop:  1,                        // penalize shortened header deliveries
   619  			},
   620  
   621  			newPeer: newSkeletonTestPeer("good-peer", chain),
   622  			end: skeletonExpect{
   623  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   624  				serve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
   625  				drop:  1,                                      // no new drops
   626  			},
   627  		},
   628  		// This test checks if a peer tries to withhold a header - *off* the sync
   629  		// boundary - instead of sending the requested amount. The malicious short
   630  		// package should not be accepted.
   631  		//
   632  		// Joining with a new peer should however unblock the sync.
   633  		{
   634  			head: chain[requestHeaders+100],
   635  			peers: []*skeletonTestPeer{
   636  				newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)),
   637  			},
   638  			mid: skeletonExpect{
   639  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   640  				serve: requestHeaders + 101 - 3, // len - head - genesis - missing
   641  				drop:  1,                        // penalize shortened header deliveries
   642  			},
   643  
   644  			newPeer: newSkeletonTestPeer("good-peer", chain),
   645  			end: skeletonExpect{
   646  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   647  				serve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
   648  				drop:  1,                                      // no new drops
   649  			},
   650  		},
   651  		// This test checks if a peer tries to duplicate a header - *on* the sync
   652  		// boundary - instead of sending the correct sequence. The malicious duped
   653  		// package should not be accepted.
   654  		//
   655  		// Joining with a new peer should however unblock the sync.
   656  		{
   657  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   658  			peers: []*skeletonTestPeer{
   659  				newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)),
   660  			},
   661  			mid: skeletonExpect{
   662  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   663  				serve: requestHeaders + 101 - 2, // len - head - genesis
   664  				drop:  1,                        // penalize invalid header sequences
   665  			},
   666  
   667  			newPeer: newSkeletonTestPeer("good-peer", chain),
   668  			end: skeletonExpect{
   669  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   670  				serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   671  				drop:  1,                                      // no new drops
   672  			},
   673  		},
   674  		// This test checks if a peer tries to duplicate a header - *off* the sync
   675  		// boundary - instead of sending the correct sequence. The malicious duped
   676  		// package should not be accepted.
   677  		//
   678  		// Joining with a new peer should however unblock the sync.
   679  		{
   680  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   681  			peers: []*skeletonTestPeer{
   682  				newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)),
   683  			},
   684  			mid: skeletonExpect{
   685  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   686  				serve: requestHeaders + 101 - 2, // len - head - genesis
   687  				drop:  1,                        // penalize invalid header sequences
   688  			},
   689  
   690  			newPeer: newSkeletonTestPeer("good-peer", chain),
   691  			end: skeletonExpect{
   692  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   693  				serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   694  				drop:  1,                                      // no new drops
   695  			},
   696  		},
   697  		// This test checks if a peer tries to inject a different header - *on*
   698  		// the sync boundary - instead of sending the correct sequence. The bad
   699  		// package should not be accepted.
   700  		//
   701  		// Joining with a new peer should however unblock the sync.
   702  		{
   703  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   704  			peers: []*skeletonTestPeer{
   705  				newSkeletonTestPeer("header-changer",
   706  					append(
   707  						append(
   708  							append([]*types.Header{}, chain[:99]...),
   709  							&types.Header{
   710  								ParentHash: chain[98].Hash(),
   711  								Number:     big.NewInt(int64(99)),
   712  								GasLimit:   1,
   713  							},
   714  						), chain[100:]...,
   715  					),
   716  				),
   717  			},
   718  			mid: skeletonExpect{
   719  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   720  				serve: requestHeaders + 101 - 2, // len - head - genesis
   721  				drop:  1,                        // different set of headers, drop // TODO(karalabe): maybe just diff sync?
   722  			},
   723  
   724  			newPeer: newSkeletonTestPeer("good-peer", chain),
   725  			end: skeletonExpect{
   726  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   727  				serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   728  				drop:  1,                                      // no new drops
   729  			},
   730  		},
   731  		// This test checks if a peer tries to inject a different header - *off*
   732  		// the sync boundary - instead of sending the correct sequence. The bad
   733  		// package should not be accepted.
   734  		//
   735  		// Joining with a new peer should however unblock the sync.
   736  		{
   737  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   738  			peers: []*skeletonTestPeer{
   739  				newSkeletonTestPeer("header-changer",
   740  					append(
   741  						append(
   742  							append([]*types.Header{}, chain[:50]...),
   743  							&types.Header{
   744  								ParentHash: chain[49].Hash(),
   745  								Number:     big.NewInt(int64(50)),
   746  								GasLimit:   1,
   747  							},
   748  						), chain[51:]...,
   749  					),
   750  				),
   751  			},
   752  			mid: skeletonExpect{
   753  				state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   754  				serve: requestHeaders + 101 - 2, // len - head - genesis
   755  				drop:  1,                        // different set of headers, drop
   756  			},
   757  
   758  			newPeer: newSkeletonTestPeer("good-peer", chain),
   759  			end: skeletonExpect{
   760  				state: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   761  				serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   762  				drop:  1,                                      // no new drops
   763  			},
   764  		},
   765  		// This test reproduces a bug caught during review (kudos to @holiman)
   766  		// where a subchain is merged with a previously interrupted one, causing
   767  		// pending data in the scratch space to become "invalid" (since we jump
   768  		// ahead during subchain merge). In that case it is expected to ignore
   769  		// the queued up data instead of trying to process on top of a shifted
   770  		// task set.
   771  		//
   772  		// The test is a bit convoluted since it needs to trigger a concurrency
   773  		// issue. First we sync up an initial chain of 2x512 items. Then announce
   774  		// 2x512+2 as head and delay delivering the head batch to fill the scratch
   775  		// space first. The delivery head should merge with the previous download
   776  		// and the scratch space must not be consumed further.
   777  		{
   778  			head: chain[2*requestHeaders],
   779  			peers: []*skeletonTestPeer{
   780  				newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header {
   781  					if origin == chain[2*requestHeaders+1].Number.Uint64() {
   782  						time.Sleep(100 * time.Millisecond)
   783  					}
   784  					return nil // Fallback to default behavior, just delayed
   785  				}),
   786  				newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header {
   787  					if origin == chain[2*requestHeaders+1].Number.Uint64() {
   788  						time.Sleep(100 * time.Millisecond)
   789  					}
   790  					return nil // Fallback to default behavior, just delayed
   791  				}),
   792  			},
   793  			mid: skeletonExpect{
   794  				state: []*subchain{{Head: 2 * requestHeaders, Tail: 1}},
   795  				serve: 2*requestHeaders - 1, // len - head - genesis
   796  			},
   797  
   798  			newHead: chain[2*requestHeaders+2],
   799  			end: skeletonExpect{
   800  				state: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}},
   801  				serve: 4 * requestHeaders,
   802  			},
   803  		},
   804  		// This test reproduces a bug caught by (@rjl493456442) where a skeleton
   805  		// header goes missing, causing the sync to get stuck and/or panic.
   806  		//
   807  		// The setup requires a previously successfully synced chain up to a block
   808  		// height N. That results is a single skeleton header (block N) and a single
   809  		// subchain (head N, Tail N) being stored on disk.
   810  		//
   811  		// The following step requires a new sync cycle to a new side chain of a
   812  		// height higher than N, and an ancestor lower than N (e.g. N-2, N+2).
   813  		// In this scenario, when processing a batch of headers, a link point of
   814  		// N-2 will be found, meaning that N-1 and N have been overwritten.
   815  		//
   816  		// The link event triggers an early exit, noticing that the previous sub-
   817  		// chain is a leftover and deletes it (with it's skeleton header N). But
   818  		// since skeleton header N has been overwritten to the new side chain, we
   819  		// end up losing it and creating a gap.
   820  		{
   821  			fill:          true,
   822  			unpredictable: true, // We have good and bad peer too, bad may be dropped, test too short for certainty
   823  
   824  			head:  chain[len(chain)/2+1], // Sync up until the sidechain common ancestor + 2
   825  			peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-oldchain", chain)},
   826  			mid: skeletonExpect{
   827  				state: []*subchain{{Head: uint64(len(chain)/2 + 1), Tail: 1}},
   828  			},
   829  
   830  			newHead: sidechain[len(sidechain)/2+3], // Sync up until the sidechain common ancestor + 4
   831  			newPeer: newSkeletonTestPeer("test-peer-newchain", sidechain),
   832  			end: skeletonExpect{
   833  				state: []*subchain{{Head: uint64(len(sidechain)/2 + 3), Tail: uint64(len(chain) / 2)}},
   834  			},
   835  		},
   836  	}
   837  	for i, tt := range tests {
   838  		// Create a fresh database and initialize it with the starting state
   839  		db := rawdb.NewMemoryDatabase()
   840  
   841  		rawdb.WriteBlock(db, types.NewBlockWithHeader(chain[0]))
   842  		rawdb.WriteReceipts(db, chain[0].Hash(), chain[0].Number.Uint64(), types.Receipts{})
   843  
   844  		// Create a peer set to feed headers through
   845  		peerset := newPeerSet()
   846  		for _, peer := range tt.peers {
   847  			peerset.Register(newPeerConnection(peer.id, zond.ETH68, peer, log.New("id", peer.id)))
   848  		}
   849  		// Create a peer dropper to track malicious peers
   850  		dropped := make(map[string]int)
   851  		drop := func(peer string) {
   852  			if p := peerset.Peer(peer); p != nil {
   853  				p.peer.(*skeletonTestPeer).dropped.Add(1)
   854  			}
   855  			peerset.Unregister(peer)
   856  			dropped[peer]++
   857  		}
   858  		// Create a backfiller if we need to run more advanced tests
   859  		filler := newHookedBackfiller()
   860  		if tt.fill {
   861  			var filled *types.Header
   862  
   863  			filler = &hookedBackfiller{
   864  				resumeHook: func() {
   865  					var progress skeletonProgress
   866  					json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
   867  
   868  					for progress.Subchains[0].Tail < progress.Subchains[0].Head {
   869  						header := rawdb.ReadSkeletonHeader(db, progress.Subchains[0].Tail)
   870  
   871  						rawdb.WriteBlock(db, types.NewBlockWithHeader(header))
   872  						rawdb.WriteReceipts(db, header.Hash(), header.Number.Uint64(), types.Receipts{})
   873  
   874  						rawdb.DeleteSkeletonHeader(db, header.Number.Uint64())
   875  
   876  						progress.Subchains[0].Tail++
   877  						progress.Subchains[0].Next = header.Hash()
   878  					}
   879  					filled = rawdb.ReadSkeletonHeader(db, progress.Subchains[0].Tail)
   880  
   881  					rawdb.WriteBlock(db, types.NewBlockWithHeader(filled))
   882  					rawdb.WriteReceipts(db, filled.Hash(), filled.Number.Uint64(), types.Receipts{})
   883  				},
   884  
   885  				suspendHook: func() *types.Header {
   886  					prev := filled
   887  					filled = nil
   888  
   889  					return prev
   890  				},
   891  			}
   892  		}
   893  		// Create a skeleton sync and run a cycle
   894  		skeleton := newSkeleton(db, peerset, drop, filler)
   895  		skeleton.Sync(tt.head, nil, true)
   896  
   897  		// Wait a bit (bleah) for the initial sync loop to go to idle. This might
   898  		// be either a finish or a never-start hence why there's no event to hook.
   899  		waitStart := time.Now()
   900  		for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 {
   901  			time.Sleep(waitTime)
   902  			if err := checkSkeletonProgress(db, tt.unpredictable, tt.peers, tt.mid); err == nil {
   903  				break
   904  			}
   905  		}
   906  		if err := checkSkeletonProgress(db, tt.unpredictable, tt.peers, tt.mid); err != nil {
   907  			t.Errorf("test %d, mid: %v", i, err)
   908  			continue
   909  		}
   910  
   911  		// Apply the post-init events if there's any
   912  		endpeers := tt.peers
   913  		if tt.newPeer != nil {
   914  			if err := peerset.Register(newPeerConnection(tt.newPeer.id, zond.ETH68, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
   915  				t.Errorf("test %d: failed to register new peer: %v", i, err)
   916  			}
   917  			time.Sleep(time.Millisecond * 50) // given time for peer registration
   918  			endpeers = append(tt.peers, tt.newPeer)
   919  		}
   920  		if tt.newHead != nil {
   921  			skeleton.Sync(tt.newHead, nil, true)
   922  		}
   923  
   924  		// Wait a bit (bleah) for the second sync loop to go to idle. This might
   925  		// be either a finish or a never-start hence why there's no event to hook.
   926  		waitStart = time.Now()
   927  		for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 {
   928  			time.Sleep(waitTime)
   929  			if err := checkSkeletonProgress(db, tt.unpredictable, endpeers, tt.end); err == nil {
   930  				break
   931  			}
   932  		}
   933  		if err := checkSkeletonProgress(db, tt.unpredictable, endpeers, tt.end); err != nil {
   934  			t.Errorf("test %d, end: %v", i, err)
   935  			continue
   936  		}
   937  		// Check that the peers served no more headers than we actually needed
   938  		// Clean up any leftover skeleton sync resources
   939  		skeleton.Terminate()
   940  	}
   941  }
   942  
   943  func checkSkeletonProgress(db zonddb.KeyValueReader, unpredictable bool, peers []*skeletonTestPeer, expected skeletonExpect) error {
   944  	var progress skeletonProgress
   945  	// Check the post-init end state if it matches the required results
   946  	json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
   947  
   948  	if len(progress.Subchains) != len(expected.state) {
   949  		return fmt.Errorf("subchain count mismatch: have %d, want %d", len(progress.Subchains), len(expected.state))
   950  	}
   951  	for j := 0; j < len(progress.Subchains); j++ {
   952  		if progress.Subchains[j].Head != expected.state[j].Head {
   953  			return fmt.Errorf("subchain %d head mismatch: have %d, want %d", j, progress.Subchains[j].Head, expected.state[j].Head)
   954  		}
   955  		if progress.Subchains[j].Tail != expected.state[j].Tail {
   956  			return fmt.Errorf("subchain %d tail mismatch: have %d, want %d", j, progress.Subchains[j].Tail, expected.state[j].Tail)
   957  		}
   958  	}
   959  	if !unpredictable {
   960  		var served uint64
   961  		for _, peer := range peers {
   962  			served += peer.served.Load()
   963  		}
   964  		if served != expected.serve {
   965  			return fmt.Errorf("served headers mismatch: have %d, want %d", served, expected.serve)
   966  		}
   967  		var drops uint64
   968  		for _, peer := range peers {
   969  			drops += peer.dropped.Load()
   970  		}
   971  		if drops != expected.drop {
   972  			return fmt.Errorf("dropped peers mismatch: have %d, want %d", drops, expected.drop)
   973  		}
   974  	}
   975  	return nil
   976  }
   977  */