github.com/tirogen/go-ethereum@v1.10.12-0.20221226051715-250cfede41b6/eth/downloader/skeleton_test.go (about)

     1  // Copyright 2022 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/tirogen/go-ethereum/common"
    29  	"github.com/tirogen/go-ethereum/core/rawdb"
    30  	"github.com/tirogen/go-ethereum/core/types"
    31  	"github.com/tirogen/go-ethereum/eth/protocols/eth"
    32  	"github.com/tirogen/go-ethereum/log"
    33  )
    34  
    35  // hookedBackfiller is a tester backfiller with all interface methods mocked and
    36  // hooked so tests can implement only the things they need.
    37  type hookedBackfiller struct {
    38  	// suspendHook is an optional hook to be called when the filler is requested
    39  	// to be suspended.
    40  	suspendHook func()
    41  
    42  	// resumeHook is an optional hook to be called when the filler is requested
    43  	// to be resumed.
    44  	resumeHook func()
    45  }
    46  
    47  // newHookedBackfiller creates a hooked backfiller with all callbacks disabled,
    48  // essentially acting as a noop.
    49  func newHookedBackfiller() backfiller {
    50  	return new(hookedBackfiller)
    51  }
    52  
    53  // suspend requests the backfiller to abort any running full or snap sync
    54  // based on the skeleton chain as it might be invalid. The backfiller should
    55  // gracefully handle multiple consecutive suspends without a resume, even
    56  // on initial startup.
    57  func (hf *hookedBackfiller) suspend() *types.Header {
    58  	if hf.suspendHook != nil {
    59  		hf.suspendHook()
    60  	}
    61  	return nil // we don't really care about header cleanups for now
    62  }
    63  
    64  // resume requests the backfiller to start running fill or snap sync based on
    65  // the skeleton chain as it has successfully been linked. Appending new heads
    66  // to the end of the chain will not result in suspend/resume cycles.
    67  func (hf *hookedBackfiller) resume() {
    68  	if hf.resumeHook != nil {
    69  		hf.resumeHook()
    70  	}
    71  }
    72  
    73  // skeletonTestPeer is a mock peer that can only serve header requests from a
    74  // pre-perated header chain (which may be arbitrarily wrong for testing).
    75  //
    76  // Requesting anything else from these peers will hard panic. Note, do *not*
    77  // implement any other methods. We actually want to make sure that the skeleton
    78  // syncer only depends on - and will only ever do so - on header requests.
    79  type skeletonTestPeer struct {
    80  	id      string          // Unique identifier of the mock peer
    81  	headers []*types.Header // Headers to serve when requested
    82  
    83  	serve func(origin uint64) []*types.Header // Hook to allow custom responses
    84  
    85  	served  uint64 // Number of headers served by this peer
    86  	dropped uint64 // Flag whether the peer was dropped (stop responding)
    87  }
    88  
    89  // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with.
    90  func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer {
    91  	return &skeletonTestPeer{
    92  		id:      id,
    93  		headers: headers,
    94  	}
    95  }
    96  
    97  // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with,
    98  // and sets an optional serve hook that can return headers for delivery instead
    99  // of the predefined chain. Useful for emulating malicious behavior that would
   100  // otherwise require dedicated peer types.
   101  func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer {
   102  	return &skeletonTestPeer{
   103  		id:      id,
   104  		headers: headers,
   105  		serve:   serve,
   106  	}
   107  }
   108  
   109  // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
   110  // origin; associated with a particular peer in the download tester. The returned
   111  // function can be used to retrieve batches of headers from the particular peer.
   112  func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
   113  	// Since skeleton test peer are in-memory mocks, dropping the does not make
   114  	// them inaccessible. As such, check a local `dropped` field to see if the
   115  	// peer has been dropped and should not respond any more.
   116  	if atomic.LoadUint64(&p.dropped) != 0 {
   117  		return nil, errors.New("peer already dropped")
   118  	}
   119  	// Skeleton sync retrieves batches of headers going backward without gaps.
   120  	// This ensures we can follow a clean parent progression without any reorg
   121  	// hiccups. There is no need for any other type of header retrieval, so do
   122  	// panic if there's such a request.
   123  	if !reverse || skip != 0 {
   124  		// Note, if other clients want to do these kinds of requests, it's their
   125  		// problem, it will still work. We just don't want *us* making complicated
   126  		// requests without a very strong reason to.
   127  		panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip))
   128  	}
   129  	// If the skeleton syncer requests the genesis block, panic. Whilst it could
   130  	// be considered a valid request, our code specifically should not request it
   131  	// ever since we want to link up headers to an existing local chain, which at
   132  	// worse will be the genesis.
   133  	if int64(origin)-int64(amount) < 0 {
   134  		panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount))
   135  	}
   136  	// To make concurrency easier, the skeleton syncer always requests fixed size
   137  	// batches of headers. Panic if the peer is requested an amount other than the
   138  	// configured batch size (apart from the request leading to the genesis).
   139  	if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) {
   140  		panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin))
   141  	}
   142  	// Simple reverse header retrieval. Fill from the peer's chain and return.
   143  	// If the tester has a serve hook set, try to use that before falling back
   144  	// to the default behavior.
   145  	var headers []*types.Header
   146  	if p.serve != nil {
   147  		headers = p.serve(origin)
   148  	}
   149  	if headers == nil {
   150  		headers = make([]*types.Header, 0, amount)
   151  		if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin
   152  			for i := 0; i < amount; i++ {
   153  				// Consider nil headers as a form of attack and withhold them. Nil
   154  				// cannot be decoded from RLP, so it's not possible to produce an
   155  				// attack by sending/receiving those over eth.
   156  				header := p.headers[int(origin)-i]
   157  				if header == nil {
   158  					continue
   159  				}
   160  				headers = append(headers, header)
   161  			}
   162  		}
   163  	}
   164  	atomic.AddUint64(&p.served, uint64(len(headers)))
   165  
   166  	hashes := make([]common.Hash, len(headers))
   167  	for i, header := range headers {
   168  		hashes[i] = header.Hash()
   169  	}
   170  	// Deliver the headers to the downloader
   171  	req := &eth.Request{
   172  		Peer: p.id,
   173  	}
   174  	res := &eth.Response{
   175  		Req:  req,
   176  		Res:  (*eth.BlockHeadersPacket)(&headers),
   177  		Meta: hashes,
   178  		Time: 1,
   179  		Done: make(chan error),
   180  	}
   181  	go func() {
   182  		sink <- res
   183  		if err := <-res.Done; err != nil {
   184  			log.Warn("Skeleton test peer response rejected", "err", err)
   185  			atomic.AddUint64(&p.dropped, 1)
   186  		}
   187  	}()
   188  	return req, nil
   189  }
   190  
   191  func (p *skeletonTestPeer) Head() (common.Hash, *big.Int) {
   192  	panic("skeleton sync must not request the remote head")
   193  }
   194  
   195  func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) {
   196  	panic("skeleton sync must not request headers by hash")
   197  }
   198  
   199  func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) {
   200  	panic("skeleton sync must not request block bodies")
   201  }
   202  
   203  func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) {
   204  	panic("skeleton sync must not request receipts")
   205  }
   206  
   207  // Tests various sync initializations based on previous leftovers in the database
   208  // and announced heads.
   209  func TestSkeletonSyncInit(t *testing.T) {
   210  	// Create a few key headers
   211  	var (
   212  		genesis  = &types.Header{Number: big.NewInt(0)}
   213  		block49  = &types.Header{Number: big.NewInt(49)}
   214  		block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
   215  		block50  = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
   216  	)
   217  	tests := []struct {
   218  		headers  []*types.Header // Database content (beside the genesis)
   219  		oldstate []*subchain     // Old sync state with various interrupted subchains
   220  		head     *types.Header   // New head header to announce to reorg to
   221  		newstate []*subchain     // Expected sync state after the reorg
   222  	}{
   223  		// Completely empty database with only the genesis set. The sync is expected
   224  		// to create a single subchain with the requested head.
   225  		{
   226  			head:     block50,
   227  			newstate: []*subchain{{Head: 50, Tail: 50}},
   228  		},
   229  		// Empty database with only the genesis set with a leftover empty sync
   230  		// progress. This is a synthetic case, just for the sake of covering things.
   231  		{
   232  			oldstate: []*subchain{},
   233  			head:     block50,
   234  			newstate: []*subchain{{Head: 50, Tail: 50}},
   235  		},
   236  		// A single leftover subchain is present, older than the new head. The
   237  		// old subchain should be left as is and a new one appended to the sync
   238  		// status.
   239  		{
   240  			oldstate: []*subchain{{Head: 10, Tail: 5}},
   241  			head:     block50,
   242  			newstate: []*subchain{
   243  				{Head: 50, Tail: 50},
   244  				{Head: 10, Tail: 5},
   245  			},
   246  		},
   247  		// Multiple leftover subchains are present, older than the new head. The
   248  		// old subchains should be left as is and a new one appended to the sync
   249  		// status.
   250  		{
   251  			oldstate: []*subchain{
   252  				{Head: 20, Tail: 15},
   253  				{Head: 10, Tail: 5},
   254  			},
   255  			head: block50,
   256  			newstate: []*subchain{
   257  				{Head: 50, Tail: 50},
   258  				{Head: 20, Tail: 15},
   259  				{Head: 10, Tail: 5},
   260  			},
   261  		},
   262  		// A single leftover subchain is present, newer than the new head. The
   263  		// newer subchain should be deleted and a fresh one created for the head.
   264  		{
   265  			oldstate: []*subchain{{Head: 65, Tail: 60}},
   266  			head:     block50,
   267  			newstate: []*subchain{{Head: 50, Tail: 50}},
   268  		},
   269  		// Multiple leftover subchain is present, newer than the new head. The
   270  		// newer subchains should be deleted and a fresh one created for the head.
   271  		{
   272  			oldstate: []*subchain{
   273  				{Head: 75, Tail: 70},
   274  				{Head: 65, Tail: 60},
   275  			},
   276  			head:     block50,
   277  			newstate: []*subchain{{Head: 50, Tail: 50}},
   278  		},
   279  
   280  		// Two leftover subchains are present, one fully older and one fully
   281  		// newer than the announced head. The head should delete the newer one,
   282  		// keeping the older one.
   283  		{
   284  			oldstate: []*subchain{
   285  				{Head: 65, Tail: 60},
   286  				{Head: 10, Tail: 5},
   287  			},
   288  			head: block50,
   289  			newstate: []*subchain{
   290  				{Head: 50, Tail: 50},
   291  				{Head: 10, Tail: 5},
   292  			},
   293  		},
   294  		// Multiple leftover subchains are present, some fully older and some
   295  		// fully newer than the announced head. The head should delete the newer
   296  		// ones, keeping the older ones.
   297  		{
   298  			oldstate: []*subchain{
   299  				{Head: 75, Tail: 70},
   300  				{Head: 65, Tail: 60},
   301  				{Head: 20, Tail: 15},
   302  				{Head: 10, Tail: 5},
   303  			},
   304  			head: block50,
   305  			newstate: []*subchain{
   306  				{Head: 50, Tail: 50},
   307  				{Head: 20, Tail: 15},
   308  				{Head: 10, Tail: 5},
   309  			},
   310  		},
   311  		// A single leftover subchain is present and the new head is extending
   312  		// it with one more header. We expect the subchain head to be pushed
   313  		// forward.
   314  		{
   315  			headers:  []*types.Header{block49},
   316  			oldstate: []*subchain{{Head: 49, Tail: 5}},
   317  			head:     block50,
   318  			newstate: []*subchain{{Head: 50, Tail: 5}},
   319  		},
   320  		// A single leftover subchain is present and although the new head does
   321  		// extend it number wise, the hash chain does not link up. We expect a
   322  		// new subchain to be created for the dangling head.
   323  		{
   324  			headers:  []*types.Header{block49B},
   325  			oldstate: []*subchain{{Head: 49, Tail: 5}},
   326  			head:     block50,
   327  			newstate: []*subchain{
   328  				{Head: 50, Tail: 50},
   329  				{Head: 49, Tail: 5},
   330  			},
   331  		},
   332  		// A single leftover subchain is present. A new head is announced that
   333  		// links into the middle of it, correctly anchoring into an existing
   334  		// header. We expect the old subchain to be truncated and extended with
   335  		// the new head.
   336  		{
   337  			headers:  []*types.Header{block49},
   338  			oldstate: []*subchain{{Head: 100, Tail: 5}},
   339  			head:     block50,
   340  			newstate: []*subchain{{Head: 50, Tail: 5}},
   341  		},
   342  		// A single leftover subchain is present. A new head is announced that
   343  		// links into the middle of it, but does not anchor into an existing
   344  		// header. We expect the old subchain to be truncated and a new chain
   345  		// be created for the dangling head.
   346  		{
   347  			headers:  []*types.Header{block49B},
   348  			oldstate: []*subchain{{Head: 100, Tail: 5}},
   349  			head:     block50,
   350  			newstate: []*subchain{
   351  				{Head: 50, Tail: 50},
   352  				{Head: 49, Tail: 5},
   353  			},
   354  		},
   355  	}
   356  	for i, tt := range tests {
   357  		// Create a fresh database and initialize it with the starting state
   358  		db := rawdb.NewMemoryDatabase()
   359  
   360  		rawdb.WriteHeader(db, genesis)
   361  		for _, header := range tt.headers {
   362  			rawdb.WriteSkeletonHeader(db, header)
   363  		}
   364  		if tt.oldstate != nil {
   365  			blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate})
   366  			rawdb.WriteSkeletonSyncStatus(db, blob)
   367  		}
   368  		// Create a skeleton sync and run a cycle
   369  		wait := make(chan struct{})
   370  
   371  		skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
   372  		skeleton.syncStarting = func() { close(wait) }
   373  		skeleton.Sync(tt.head, true)
   374  
   375  		<-wait
   376  		skeleton.Terminate()
   377  
   378  		// Ensure the correct resulting sync status
   379  		var progress skeletonProgress
   380  		json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
   381  
   382  		if len(progress.Subchains) != len(tt.newstate) {
   383  			t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate))
   384  			continue
   385  		}
   386  		for j := 0; j < len(progress.Subchains); j++ {
   387  			if progress.Subchains[j].Head != tt.newstate[j].Head {
   388  				t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head)
   389  			}
   390  			if progress.Subchains[j].Tail != tt.newstate[j].Tail {
   391  				t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail)
   392  			}
   393  		}
   394  	}
   395  }
   396  
   397  // Tests that a running skeleton sync can be extended with properly linked up
   398  // headers but not with side chains.
   399  func TestSkeletonSyncExtend(t *testing.T) {
   400  	// Create a few key headers
   401  	var (
   402  		genesis  = &types.Header{Number: big.NewInt(0)}
   403  		block49  = &types.Header{Number: big.NewInt(49)}
   404  		block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
   405  		block50  = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
   406  		block51  = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()}
   407  	)
   408  	tests := []struct {
   409  		head     *types.Header // New head header to announce to reorg to
   410  		extend   *types.Header // New head header to announce to extend with
   411  		newstate []*subchain   // Expected sync state after the reorg
   412  		err      error         // Whether extension succeeds or not
   413  	}{
   414  		// Initialize a sync and try to extend it with a subsequent block.
   415  		{
   416  			head:   block49,
   417  			extend: block50,
   418  			newstate: []*subchain{
   419  				{Head: 50, Tail: 49},
   420  			},
   421  		},
   422  		// Initialize a sync and try to extend it with the existing head block.
   423  		{
   424  			head:   block49,
   425  			extend: block49,
   426  			newstate: []*subchain{
   427  				{Head: 49, Tail: 49},
   428  			},
   429  		},
   430  		// Initialize a sync and try to extend it with a sibling block.
   431  		{
   432  			head:   block49,
   433  			extend: block49B,
   434  			newstate: []*subchain{
   435  				{Head: 49, Tail: 49},
   436  			},
   437  			err: errReorgDenied,
   438  		},
   439  		// Initialize a sync and try to extend it with a number-wise sequential
   440  		// header, but a hash wise non-linking one.
   441  		{
   442  			head:   block49B,
   443  			extend: block50,
   444  			newstate: []*subchain{
   445  				{Head: 49, Tail: 49},
   446  			},
   447  			err: errReorgDenied,
   448  		},
   449  		// Initialize a sync and try to extend it with a non-linking future block.
   450  		{
   451  			head:   block49,
   452  			extend: block51,
   453  			newstate: []*subchain{
   454  				{Head: 49, Tail: 49},
   455  			},
   456  			err: errReorgDenied,
   457  		},
   458  		// Initialize a sync and try to extend it with a past canonical block.
   459  		{
   460  			head:   block50,
   461  			extend: block49,
   462  			newstate: []*subchain{
   463  				{Head: 50, Tail: 50},
   464  			},
   465  			err: errReorgDenied,
   466  		},
   467  		// Initialize a sync and try to extend it with a past sidechain block.
   468  		{
   469  			head:   block50,
   470  			extend: block49B,
   471  			newstate: []*subchain{
   472  				{Head: 50, Tail: 50},
   473  			},
   474  			err: errReorgDenied,
   475  		},
   476  	}
   477  	for i, tt := range tests {
   478  		// Create a fresh database and initialize it with the starting state
   479  		db := rawdb.NewMemoryDatabase()
   480  		rawdb.WriteHeader(db, genesis)
   481  
   482  		// Create a skeleton sync and run a cycle
   483  		wait := make(chan struct{})
   484  
   485  		skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
   486  		skeleton.syncStarting = func() { close(wait) }
   487  		skeleton.Sync(tt.head, true)
   488  
   489  		<-wait
   490  		if err := skeleton.Sync(tt.extend, false); err != tt.err {
   491  			t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err)
   492  		}
   493  		skeleton.Terminate()
   494  
   495  		// Ensure the correct resulting sync status
   496  		var progress skeletonProgress
   497  		json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
   498  
   499  		if len(progress.Subchains) != len(tt.newstate) {
   500  			t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate))
   501  			continue
   502  		}
   503  		for j := 0; j < len(progress.Subchains); j++ {
   504  			if progress.Subchains[j].Head != tt.newstate[j].Head {
   505  				t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head)
   506  			}
   507  			if progress.Subchains[j].Tail != tt.newstate[j].Tail {
   508  				t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail)
   509  			}
   510  		}
   511  	}
   512  }
   513  
   514  // Tests that the skeleton sync correctly retrieves headers from one or more
   515  // peers without duplicates or other strange side effects.
   516  func TestSkeletonSyncRetrievals(t *testing.T) {
   517  	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
   518  
   519  	// Since skeleton headers don't need to be meaningful, beyond a parent hash
   520  	// progression, create a long fake chain to test with.
   521  	chain := []*types.Header{{Number: big.NewInt(0)}}
   522  	for i := 1; i < 10000; i++ {
   523  		chain = append(chain, &types.Header{
   524  			ParentHash: chain[i-1].Hash(),
   525  			Number:     big.NewInt(int64(i)),
   526  		})
   527  	}
   528  	tests := []struct {
   529  		headers  []*types.Header // Database content (beside the genesis)
   530  		oldstate []*subchain     // Old sync state with various interrupted subchains
   531  
   532  		head     *types.Header       // New head header to announce to reorg to
   533  		peers    []*skeletonTestPeer // Initial peer set to start the sync with
   534  		midstate []*subchain         // Expected sync state after initial cycle
   535  		midserve uint64              // Expected number of header retrievals after initial cycle
   536  		middrop  uint64              // Expected number of peers dropped after initial cycle
   537  
   538  		newHead  *types.Header     // New header to anoint on top of the old one
   539  		newPeer  *skeletonTestPeer // New peer to join the skeleton syncer
   540  		endstate []*subchain       // Expected sync state after the post-init event
   541  		endserve uint64            // Expected number of header retrievals after the post-init event
   542  		enddrop  uint64            // Expected number of peers dropped after the post-init event
   543  	}{
   544  		// Completely empty database with only the genesis set. The sync is expected
   545  		// to create a single subchain with the requested head. No peers however, so
   546  		// the sync should be stuck without any progression.
   547  		//
   548  		// When a new peer is added, it should detect the join and fill the headers
   549  		// to the genesis block.
   550  		{
   551  			head:     chain[len(chain)-1],
   552  			midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}},
   553  
   554  			newPeer:  newSkeletonTestPeer("test-peer", chain),
   555  			endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   556  			endserve: uint64(len(chain) - 2), // len - head - genesis
   557  		},
   558  		// Completely empty database with only the genesis set. The sync is expected
   559  		// to create a single subchain with the requested head. With one valid peer,
   560  		// the sync is expected to complete already in the initial round.
   561  		//
   562  		// Adding a second peer should not have any effect.
   563  		{
   564  			head:     chain[len(chain)-1],
   565  			peers:    []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)},
   566  			midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   567  			midserve: uint64(len(chain) - 2), // len - head - genesis
   568  
   569  			newPeer:  newSkeletonTestPeer("test-peer-2", chain),
   570  			endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   571  			endserve: uint64(len(chain) - 2), // len - head - genesis
   572  		},
   573  		// Completely empty database with only the genesis set. The sync is expected
   574  		// to create a single subchain with the requested head. With many valid peers,
   575  		// the sync is expected to complete already in the initial round.
   576  		//
   577  		// Adding a new peer should not have any effect.
   578  		{
   579  			head: chain[len(chain)-1],
   580  			peers: []*skeletonTestPeer{
   581  				newSkeletonTestPeer("test-peer-1", chain),
   582  				newSkeletonTestPeer("test-peer-2", chain),
   583  				newSkeletonTestPeer("test-peer-3", chain),
   584  			},
   585  			midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   586  			midserve: uint64(len(chain) - 2), // len - head - genesis
   587  
   588  			newPeer:  newSkeletonTestPeer("test-peer-4", chain),
   589  			endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
   590  			endserve: uint64(len(chain) - 2), // len - head - genesis
   591  		},
   592  		// This test checks if a peer tries to withhold a header - *on* the sync
   593  		// boundary - instead of sending the requested amount. The malicious short
   594  		// package should not be accepted.
   595  		//
   596  		// Joining with a new peer should however unblock the sync.
   597  		{
   598  			head: chain[requestHeaders+100],
   599  			peers: []*skeletonTestPeer{
   600  				newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)),
   601  			},
   602  			midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   603  			midserve: requestHeaders + 101 - 3, // len - head - genesis - missing
   604  			middrop:  1,                        // penalize shortened header deliveries
   605  
   606  			newPeer:  newSkeletonTestPeer("good-peer", chain),
   607  			endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   608  			endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
   609  			enddrop:  1,                                      // no new drops
   610  		},
   611  		// This test checks if a peer tries to withhold a header - *off* the sync
   612  		// boundary - instead of sending the requested amount. The malicious short
   613  		// package should not be accepted.
   614  		//
   615  		// Joining with a new peer should however unblock the sync.
   616  		{
   617  			head: chain[requestHeaders+100],
   618  			peers: []*skeletonTestPeer{
   619  				newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)),
   620  			},
   621  			midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   622  			midserve: requestHeaders + 101 - 3, // len - head - genesis - missing
   623  			middrop:  1,                        // penalize shortened header deliveries
   624  
   625  			newPeer:  newSkeletonTestPeer("good-peer", chain),
   626  			endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   627  			endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
   628  			enddrop:  1,                                      // no new drops
   629  		},
   630  		// This test checks if a peer tries to duplicate a header - *on* the sync
   631  		// boundary - instead of sending the correct sequence. The malicious duped
   632  		// package should not be accepted.
   633  		//
   634  		// Joining with a new peer should however unblock the sync.
   635  		{
   636  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   637  			peers: []*skeletonTestPeer{
   638  				newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)),
   639  			},
   640  			midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   641  			midserve: requestHeaders + 101 - 2, // len - head - genesis
   642  			middrop:  1,                        // penalize invalid header sequences
   643  
   644  			newPeer:  newSkeletonTestPeer("good-peer", chain),
   645  			endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   646  			endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   647  			enddrop:  1,                                      // no new drops
   648  		},
   649  		// This test checks if a peer tries to duplicate a header - *off* the sync
   650  		// boundary - instead of sending the correct sequence. The malicious duped
   651  		// package should not be accepted.
   652  		//
   653  		// Joining with a new peer should however unblock the sync.
   654  		{
   655  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   656  			peers: []*skeletonTestPeer{
   657  				newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)),
   658  			},
   659  			midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   660  			midserve: requestHeaders + 101 - 2, // len - head - genesis
   661  			middrop:  1,                        // penalize invalid header sequences
   662  
   663  			newPeer:  newSkeletonTestPeer("good-peer", chain),
   664  			endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   665  			endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   666  			enddrop:  1,                                      // no new drops
   667  		},
   668  		// This test checks if a peer tries to inject a different header - *on*
   669  		// the sync boundary - instead of sending the correct sequence. The bad
   670  		// package should not be accepted.
   671  		//
   672  		// Joining with a new peer should however unblock the sync.
   673  		{
   674  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   675  			peers: []*skeletonTestPeer{
   676  				newSkeletonTestPeer("header-changer",
   677  					append(
   678  						append(
   679  							append([]*types.Header{}, chain[:99]...),
   680  							&types.Header{
   681  								ParentHash: chain[98].Hash(),
   682  								Number:     big.NewInt(int64(99)),
   683  								GasLimit:   1,
   684  							},
   685  						), chain[100:]...,
   686  					),
   687  				),
   688  			},
   689  			midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   690  			midserve: requestHeaders + 101 - 2, // len - head - genesis
   691  			middrop:  1,                        // different set of headers, drop // TODO(karalabe): maybe just diff sync?
   692  
   693  			newPeer:  newSkeletonTestPeer("good-peer", chain),
   694  			endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   695  			endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   696  			enddrop:  1,                                      // no new drops
   697  		},
   698  		// This test checks if a peer tries to inject a different header - *off*
   699  		// the sync boundary - instead of sending the correct sequence. The bad
   700  		// package should not be accepted.
   701  		//
   702  		// Joining with a new peer should however unblock the sync.
   703  		{
   704  			head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
   705  			peers: []*skeletonTestPeer{
   706  				newSkeletonTestPeer("header-changer",
   707  					append(
   708  						append(
   709  							append([]*types.Header{}, chain[:50]...),
   710  							&types.Header{
   711  								ParentHash: chain[49].Hash(),
   712  								Number:     big.NewInt(int64(50)),
   713  								GasLimit:   1,
   714  							},
   715  						), chain[51:]...,
   716  					),
   717  				),
   718  			},
   719  			midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
   720  			midserve: requestHeaders + 101 - 2, // len - head - genesis
   721  			middrop:  1,                        // different set of headers, drop
   722  
   723  			newPeer:  newSkeletonTestPeer("good-peer", chain),
   724  			endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
   725  			endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
   726  			enddrop:  1,                                      // no new drops
   727  		},
   728  		// This test reproduces a bug caught during review (kudos to @holiman)
   729  		// where a subchain is merged with a previously interrupted one, causing
   730  		// pending data in the scratch space to become "invalid" (since we jump
   731  		// ahead during subchain merge). In that case it is expected to ignore
   732  		// the queued up data instead of trying to process on top of a shifted
   733  		// task set.
   734  		//
   735  		// The test is a bit convoluted since it needs to trigger a concurrency
   736  		// issue. First we sync up an initial chain of 2x512 items. Then announce
   737  		// 2x512+2 as head and delay delivering the head batch to fill the scratch
   738  		// space first. The delivery head should merge with the previous download
   739  		// and the scratch space must not be consumed further.
   740  		{
   741  			head: chain[2*requestHeaders],
   742  			peers: []*skeletonTestPeer{
   743  				newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header {
   744  					if origin == chain[2*requestHeaders+1].Number.Uint64() {
   745  						time.Sleep(100 * time.Millisecond)
   746  					}
   747  					return nil // Fallback to default behavior, just delayed
   748  				}),
   749  				newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header {
   750  					if origin == chain[2*requestHeaders+1].Number.Uint64() {
   751  						time.Sleep(100 * time.Millisecond)
   752  					}
   753  					return nil // Fallback to default behavior, just delayed
   754  				}),
   755  			},
   756  			midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}},
   757  			midserve: 2*requestHeaders - 1, // len - head - genesis
   758  
   759  			newHead:  chain[2*requestHeaders+2],
   760  			endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}},
   761  			endserve: 4 * requestHeaders,
   762  		},
   763  	}
   764  	for i, tt := range tests {
   765  		// Create a fresh database and initialize it with the starting state
   766  		db := rawdb.NewMemoryDatabase()
   767  		rawdb.WriteHeader(db, chain[0])
   768  
   769  		// Create a peer set to feed headers through
   770  		peerset := newPeerSet()
   771  		for _, peer := range tt.peers {
   772  			peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id)))
   773  		}
   774  		// Create a peer dropper to track malicious peers
   775  		dropped := make(map[string]int)
   776  		drop := func(peer string) {
   777  			if p := peerset.Peer(peer); p != nil {
   778  				atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1)
   779  			}
   780  			peerset.Unregister(peer)
   781  			dropped[peer]++
   782  		}
   783  		// Create a skeleton sync and run a cycle
   784  		skeleton := newSkeleton(db, peerset, drop, newHookedBackfiller())
   785  		skeleton.Sync(tt.head, true)
   786  
   787  		var progress skeletonProgress
   788  		// Wait a bit (bleah) for the initial sync loop to go to idle. This might
   789  		// be either a finish or a never-start hence why there's no event to hook.
   790  		check := func() error {
   791  			if len(progress.Subchains) != len(tt.midstate) {
   792  				return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate))
   793  			}
   794  			for j := 0; j < len(progress.Subchains); j++ {
   795  				if progress.Subchains[j].Head != tt.midstate[j].Head {
   796  					return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head)
   797  				}
   798  				if progress.Subchains[j].Tail != tt.midstate[j].Tail {
   799  					return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail)
   800  				}
   801  			}
   802  			return nil
   803  		}
   804  
   805  		waitStart := time.Now()
   806  		for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 {
   807  			time.Sleep(waitTime)
   808  			// Check the post-init end state if it matches the required results
   809  			json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
   810  			if err := check(); err == nil {
   811  				break
   812  			}
   813  		}
   814  		if err := check(); err != nil {
   815  			t.Error(err)
   816  			continue
   817  		}
   818  		var served uint64
   819  		for _, peer := range tt.peers {
   820  			served += atomic.LoadUint64(&peer.served)
   821  		}
   822  		if served != tt.midserve {
   823  			t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve)
   824  		}
   825  		var drops uint64
   826  		for _, peer := range tt.peers {
   827  			drops += atomic.LoadUint64(&peer.dropped)
   828  		}
   829  		if drops != tt.middrop {
   830  			t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
   831  		}
   832  		// Apply the post-init events if there's any
   833  		if tt.newHead != nil {
   834  			skeleton.Sync(tt.newHead, true)
   835  		}
   836  		if tt.newPeer != nil {
   837  			if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
   838  				t.Errorf("test %d: failed to register new peer: %v", i, err)
   839  			}
   840  		}
   841  		// Wait a bit (bleah) for the second sync loop to go to idle. This might
   842  		// be either a finish or a never-start hence why there's no event to hook.
   843  		check = func() error {
   844  			if len(progress.Subchains) != len(tt.endstate) {
   845  				return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate))
   846  			}
   847  			for j := 0; j < len(progress.Subchains); j++ {
   848  				if progress.Subchains[j].Head != tt.endstate[j].Head {
   849  					return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head)
   850  				}
   851  				if progress.Subchains[j].Tail != tt.endstate[j].Tail {
   852  					return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail)
   853  				}
   854  			}
   855  			return nil
   856  		}
   857  		waitStart = time.Now()
   858  		for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 {
   859  			time.Sleep(waitTime)
   860  			// Check the post-init end state if it matches the required results
   861  			json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
   862  			if err := check(); err == nil {
   863  				break
   864  			}
   865  		}
   866  		if err := check(); err != nil {
   867  			t.Error(err)
   868  			continue
   869  		}
   870  		// Check that the peers served no more headers than we actually needed
   871  		served = 0
   872  		for _, peer := range tt.peers {
   873  			served += atomic.LoadUint64(&peer.served)
   874  		}
   875  		if tt.newPeer != nil {
   876  			served += atomic.LoadUint64(&tt.newPeer.served)
   877  		}
   878  		if served != tt.endserve {
   879  			t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve)
   880  		}
   881  		drops = 0
   882  		for _, peer := range tt.peers {
   883  			drops += atomic.LoadUint64(&peer.dropped)
   884  		}
   885  		if tt.newPeer != nil {
   886  			drops += atomic.LoadUint64(&tt.newPeer.dropped)
   887  		}
   888  		if drops != tt.middrop {
   889  			t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
   890  		}
   891  		// Clean up any leftover skeleton sync resources
   892  		skeleton.Terminate()
   893  	}
   894  }