github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/eth/protocols/snap/sync_test.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	"sort"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"golang.org/x/crypto/sha3"
    31  
    32  	"github.com/scroll-tech/go-ethereum/common"
    33  	"github.com/scroll-tech/go-ethereum/core/rawdb"
    34  	"github.com/scroll-tech/go-ethereum/core/types"
    35  	"github.com/scroll-tech/go-ethereum/crypto"
    36  	"github.com/scroll-tech/go-ethereum/crypto/codehash"
    37  	"github.com/scroll-tech/go-ethereum/ethdb"
    38  	"github.com/scroll-tech/go-ethereum/light"
    39  	"github.com/scroll-tech/go-ethereum/log"
    40  	"github.com/scroll-tech/go-ethereum/rlp"
    41  	"github.com/scroll-tech/go-ethereum/trie"
    42  )
    43  
    44  func TestHashing(t *testing.T) {
    45  	t.Parallel()
    46  
    47  	var bytecodes = make([][]byte, 10)
    48  	for i := 0; i < len(bytecodes); i++ {
    49  		buf := make([]byte, 100)
    50  		rand.Read(buf)
    51  		bytecodes[i] = buf
    52  	}
    53  	var want, got string
    54  	var old = func() {
    55  		hasher := sha3.NewLegacyKeccak256()
    56  		for i := 0; i < len(bytecodes); i++ {
    57  			hasher.Reset()
    58  			hasher.Write(bytecodes[i])
    59  			hash := hasher.Sum(nil)
    60  			got = fmt.Sprintf("%v\n%v", got, hash)
    61  		}
    62  	}
    63  	var new = func() {
    64  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    65  		var hash = make([]byte, 32)
    66  		for i := 0; i < len(bytecodes); i++ {
    67  			hasher.Reset()
    68  			hasher.Write(bytecodes[i])
    69  			hasher.Read(hash)
    70  			want = fmt.Sprintf("%v\n%v", want, hash)
    71  		}
    72  	}
    73  	old()
    74  	new()
    75  	if want != got {
    76  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    77  	}
    78  }
    79  
    80  func BenchmarkHashing(b *testing.B) {
    81  	var bytecodes = make([][]byte, 10000)
    82  	for i := 0; i < len(bytecodes); i++ {
    83  		buf := make([]byte, 100)
    84  		rand.Read(buf)
    85  		bytecodes[i] = buf
    86  	}
    87  	var old = func() {
    88  		hasher := sha3.NewLegacyKeccak256()
    89  		for i := 0; i < len(bytecodes); i++ {
    90  			hasher.Reset()
    91  			hasher.Write(bytecodes[i])
    92  			hasher.Sum(nil)
    93  		}
    94  	}
    95  	var new = func() {
    96  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    97  		var hash = make([]byte, 32)
    98  		for i := 0; i < len(bytecodes); i++ {
    99  			hasher.Reset()
   100  			hasher.Write(bytecodes[i])
   101  			hasher.Read(hash)
   102  		}
   103  	}
   104  	b.Run("old", func(b *testing.B) {
   105  		b.ReportAllocs()
   106  		for i := 0; i < b.N; i++ {
   107  			old()
   108  		}
   109  	})
   110  	b.Run("new", func(b *testing.B) {
   111  		b.ReportAllocs()
   112  		for i := 0; i < b.N; i++ {
   113  			new()
   114  		}
   115  	})
   116  }
   117  
   118  type (
   119  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   120  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   121  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   122  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   123  )
   124  
   125  type testPeer struct {
   126  	id            string
   127  	test          *testing.T
   128  	remote        *Syncer
   129  	logger        log.Logger
   130  	accountTrie   *trie.Trie
   131  	accountValues entrySlice
   132  	storageTries  map[common.Hash]*trie.Trie
   133  	storageValues map[common.Hash]entrySlice
   134  
   135  	accountRequestHandler accountHandlerFunc
   136  	storageRequestHandler storageHandlerFunc
   137  	trieRequestHandler    trieHandlerFunc
   138  	codeRequestHandler    codeHandlerFunc
   139  	term                  func()
   140  
   141  	// counters
   142  	nAccountRequests  int
   143  	nStorageRequests  int
   144  	nBytecodeRequests int
   145  	nTrienodeRequests int
   146  }
   147  
   148  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   149  	peer := &testPeer{
   150  		id:                    id,
   151  		test:                  t,
   152  		logger:                log.New("id", id),
   153  		accountRequestHandler: defaultAccountRequestHandler,
   154  		trieRequestHandler:    defaultTrieRequestHandler,
   155  		storageRequestHandler: defaultStorageRequestHandler,
   156  		codeRequestHandler:    defaultCodeRequestHandler,
   157  		term:                  term,
   158  	}
   159  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   160  	//peer.logger.SetHandler(stderrHandler)
   161  	return peer
   162  }
   163  
   164  func (t *testPeer) ID() string      { return t.id }
   165  func (t *testPeer) Log() log.Logger { return t.logger }
   166  
   167  func (t *testPeer) Stats() string {
   168  	return fmt.Sprintf(`Account requests: %d
   169  Storage requests: %d
   170  Bytecode requests: %d
   171  Trienode requests: %d
   172  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   173  }
   174  
   175  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   176  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   177  	t.nAccountRequests++
   178  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   179  	return nil
   180  }
   181  
   182  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   183  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   184  	t.nTrienodeRequests++
   185  	go t.trieRequestHandler(t, id, root, paths, bytes)
   186  	return nil
   187  }
   188  
   189  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   190  	t.nStorageRequests++
   191  	if len(accounts) == 1 && origin != nil {
   192  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   193  	} else {
   194  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   195  	}
   196  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   197  	return nil
   198  }
   199  
   200  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   201  	t.nBytecodeRequests++
   202  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   203  	go t.codeRequestHandler(t, id, hashes, bytes)
   204  	return nil
   205  }
   206  
   207  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   208  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   209  	// Pass the response
   210  	var nodes [][]byte
   211  	for _, pathset := range paths {
   212  		switch len(pathset) {
   213  		case 1:
   214  			blob, _, err := t.accountTrie.TryGetNode(pathset[0])
   215  			if err != nil {
   216  				t.logger.Info("Error handling req", "error", err)
   217  				break
   218  			}
   219  			nodes = append(nodes, blob)
   220  		default:
   221  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   222  			for _, path := range pathset[1:] {
   223  				blob, _, err := account.TryGetNode(path)
   224  				if err != nil {
   225  					t.logger.Info("Error handling req", "error", err)
   226  					break
   227  				}
   228  				nodes = append(nodes, blob)
   229  			}
   230  		}
   231  	}
   232  	t.remote.OnTrieNodes(t, requestId, nodes)
   233  	return nil
   234  }
   235  
   236  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   237  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   238  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   239  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   240  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   241  		t.term()
   242  		return err
   243  	}
   244  	return nil
   245  }
   246  
   247  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   248  	var size uint64
   249  	if limit == (common.Hash{}) {
   250  		limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   251  	}
   252  	for _, entry := range t.accountValues {
   253  		if size > cap {
   254  			break
   255  		}
   256  		if bytes.Compare(origin[:], entry.k) <= 0 {
   257  			keys = append(keys, common.BytesToHash(entry.k))
   258  			vals = append(vals, entry.v)
   259  			size += uint64(32 + len(entry.v))
   260  		}
   261  		// If we've exceeded the request threshold, abort
   262  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   263  			break
   264  		}
   265  	}
   266  	// Unless we send the entire trie, we need to supply proofs
   267  	// Actually, we need to supply proofs either way! This seems to be an implementation
   268  	// quirk in go-ethereum
   269  	proof := light.NewNodeSet()
   270  	if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   271  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   272  	}
   273  	if len(keys) > 0 {
   274  		lastK := (keys[len(keys)-1])[:]
   275  		if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
   276  			t.logger.Error("Could not prove last item", "error", err)
   277  		}
   278  	}
   279  	for _, blob := range proof.NodeList() {
   280  		proofs = append(proofs, blob)
   281  	}
   282  	return keys, vals, proofs
   283  }
   284  
   285  // defaultStorageRequestHandler is a well-behaving storage request handler
   286  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   287  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   288  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   289  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   290  		t.term()
   291  	}
   292  	return nil
   293  }
   294  
   295  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   296  	var bytecodes [][]byte
   297  	for _, h := range hashes {
   298  		bytecodes = append(bytecodes, getCodeByHash(h))
   299  	}
   300  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   301  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   302  		t.term()
   303  	}
   304  	return nil
   305  }
   306  
   307  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   308  	var size uint64
   309  	for _, account := range accounts {
   310  		// The first account might start from a different origin and end sooner
   311  		var originHash common.Hash
   312  		if len(origin) > 0 {
   313  			originHash = common.BytesToHash(origin)
   314  		}
   315  		var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   316  		if len(limit) > 0 {
   317  			limitHash = common.BytesToHash(limit)
   318  		}
   319  		var (
   320  			keys  []common.Hash
   321  			vals  [][]byte
   322  			abort bool
   323  		)
   324  		for _, entry := range t.storageValues[account] {
   325  			if size >= max {
   326  				abort = true
   327  				break
   328  			}
   329  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   330  				continue
   331  			}
   332  			keys = append(keys, common.BytesToHash(entry.k))
   333  			vals = append(vals, entry.v)
   334  			size += uint64(32 + len(entry.v))
   335  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   336  				break
   337  			}
   338  		}
   339  		hashes = append(hashes, keys)
   340  		slots = append(slots, vals)
   341  
   342  		// Generate the Merkle proofs for the first and last storage slot, but
   343  		// only if the response was capped. If the entire storage trie included
   344  		// in the response, no need for any proofs.
   345  		if originHash != (common.Hash{}) || abort {
   346  			// If we're aborting, we need to prove the first and last item
   347  			// This terminates the response (and thus the loop)
   348  			proof := light.NewNodeSet()
   349  			stTrie := t.storageTries[account]
   350  
   351  			// Here's a potential gotcha: when constructing the proof, we cannot
   352  			// use the 'origin' slice directly, but must use the full 32-byte
   353  			// hash form.
   354  			if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
   355  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   356  			}
   357  			if len(keys) > 0 {
   358  				lastK := (keys[len(keys)-1])[:]
   359  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   360  					t.logger.Error("Could not prove last item", "error", err)
   361  				}
   362  			}
   363  			for _, blob := range proof.NodeList() {
   364  				proofs = append(proofs, blob)
   365  			}
   366  			break
   367  		}
   368  	}
   369  	return hashes, slots, proofs
   370  }
   371  
   372  //  the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
   373  // supplies the proof for the last account, even if it is 'complete'.h
   374  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   375  	var size uint64
   376  	max = max * 3 / 4
   377  
   378  	var origin common.Hash
   379  	if len(bOrigin) > 0 {
   380  		origin = common.BytesToHash(bOrigin)
   381  	}
   382  	var exit bool
   383  	for i, account := range accounts {
   384  		var keys []common.Hash
   385  		var vals [][]byte
   386  		for _, entry := range t.storageValues[account] {
   387  			if bytes.Compare(entry.k, origin[:]) < 0 {
   388  				exit = true
   389  			}
   390  			keys = append(keys, common.BytesToHash(entry.k))
   391  			vals = append(vals, entry.v)
   392  			size += uint64(32 + len(entry.v))
   393  			if size > max {
   394  				exit = true
   395  			}
   396  		}
   397  		if i == len(accounts)-1 {
   398  			exit = true
   399  		}
   400  		hashes = append(hashes, keys)
   401  		slots = append(slots, vals)
   402  
   403  		if exit {
   404  			// If we're aborting, we need to prove the first and last item
   405  			// This terminates the response (and thus the loop)
   406  			proof := light.NewNodeSet()
   407  			stTrie := t.storageTries[account]
   408  
   409  			// Here's a potential gotcha: when constructing the proof, we cannot
   410  			// use the 'origin' slice directly, but must use the full 32-byte
   411  			// hash form.
   412  			if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   413  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   414  					"error", err)
   415  			}
   416  			if len(keys) > 0 {
   417  				lastK := (keys[len(keys)-1])[:]
   418  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   419  					t.logger.Error("Could not prove last item", "error", err)
   420  				}
   421  			}
   422  			for _, blob := range proof.NodeList() {
   423  				proofs = append(proofs, blob)
   424  			}
   425  			break
   426  		}
   427  	}
   428  	return hashes, slots, proofs
   429  }
   430  
   431  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   432  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   433  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   434  	return nil
   435  }
   436  
   437  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   438  	return nil
   439  }
   440  
   441  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   442  	t.remote.OnTrieNodes(t, requestId, nil)
   443  	return nil
   444  }
   445  
   446  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   447  	return nil
   448  }
   449  
   450  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   451  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   452  	return nil
   453  }
   454  
   455  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   456  	return nil
   457  }
   458  
   459  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   460  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   461  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   462  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   463  		t.term()
   464  	}
   465  	return nil
   466  }
   467  
   468  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   469  //	var bytecodes [][]byte
   470  //	t.remote.OnByteCodes(t, id, bytecodes)
   471  //	return nil
   472  //}
   473  
   474  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   475  	var bytecodes [][]byte
   476  	for _, h := range hashes {
   477  		// Send back the hashes
   478  		bytecodes = append(bytecodes, h[:])
   479  	}
   480  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   481  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   482  		// Mimic the real-life handler, which drops a peer on errors
   483  		t.remote.Unregister(t.id)
   484  	}
   485  	return nil
   486  }
   487  
   488  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   489  	var bytecodes [][]byte
   490  	for _, h := range hashes[:1] {
   491  		bytecodes = append(bytecodes, getCodeByHash(h))
   492  	}
   493  	// Missing bytecode can be retrieved again, no error expected
   494  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   495  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   496  		t.term()
   497  	}
   498  	return nil
   499  }
   500  
   501  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   502  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   503  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   504  }
   505  
   506  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   507  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   508  }
   509  
   510  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   511  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   512  //}
   513  
   514  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   515  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   516  	if len(proofs) > 0 {
   517  		proofs = proofs[1:]
   518  	}
   519  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   520  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   521  		// Mimic the real-life handler, which drops a peer on errors
   522  		t.remote.Unregister(t.id)
   523  	}
   524  	return nil
   525  }
   526  
   527  // corruptStorageRequestHandler doesn't provide good proofs
   528  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   529  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   530  	if len(proofs) > 0 {
   531  		proofs = proofs[1:]
   532  	}
   533  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   534  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   535  		// Mimic the real-life handler, which drops a peer on errors
   536  		t.remote.Unregister(t.id)
   537  	}
   538  	return nil
   539  }
   540  
   541  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   542  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   543  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   544  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   545  		// Mimic the real-life handler, which drops a peer on errors
   546  		t.remote.Unregister(t.id)
   547  	}
   548  	return nil
   549  }
   550  
   551  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   552  // also ship the entire trie inside the proof. If the attack is successful,
   553  // the remote side does not do any follow-up requests
   554  func TestSyncBloatedProof(t *testing.T) {
   555  	t.Parallel()
   556  
   557  	var (
   558  		once   sync.Once
   559  		cancel = make(chan struct{})
   560  		term   = func() {
   561  			once.Do(func() {
   562  				close(cancel)
   563  			})
   564  		}
   565  	)
   566  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   567  	source := newTestPeer("source", t, term)
   568  	source.accountTrie = sourceAccountTrie
   569  	source.accountValues = elems
   570  
   571  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   572  		var (
   573  			proofs [][]byte
   574  			keys   []common.Hash
   575  			vals   [][]byte
   576  		)
   577  		// The values
   578  		for _, entry := range t.accountValues {
   579  			if bytes.Compare(entry.k, origin[:]) < 0 {
   580  				continue
   581  			}
   582  			if bytes.Compare(entry.k, limit[:]) > 0 {
   583  				continue
   584  			}
   585  			keys = append(keys, common.BytesToHash(entry.k))
   586  			vals = append(vals, entry.v)
   587  		}
   588  		// The proofs
   589  		proof := light.NewNodeSet()
   590  		if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   591  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   592  		}
   593  		// The bloat: add proof of every single element
   594  		for _, entry := range t.accountValues {
   595  			if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
   596  				t.logger.Error("Could not prove item", "error", err)
   597  			}
   598  		}
   599  		// And remove one item from the elements
   600  		if len(keys) > 2 {
   601  			keys = append(keys[:1], keys[2:]...)
   602  			vals = append(vals[:1], vals[2:]...)
   603  		}
   604  		for _, blob := range proof.NodeList() {
   605  			proofs = append(proofs, blob)
   606  		}
   607  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   608  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   609  			t.term()
   610  			// This is actually correct, signal to exit the test successfully
   611  		}
   612  		return nil
   613  	}
   614  	syncer := setupSyncer(source)
   615  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   616  		t.Fatal("No error returned from incomplete/cancelled sync")
   617  	}
   618  }
   619  
   620  func setupSyncer(peers ...*testPeer) *Syncer {
   621  	stateDb := rawdb.NewMemoryDatabase()
   622  	syncer := NewSyncer(stateDb)
   623  	for _, peer := range peers {
   624  		syncer.Register(peer)
   625  		peer.remote = syncer
   626  	}
   627  	return syncer
   628  }
   629  
   630  // TestSync tests a basic sync with one peer
   631  func TestSync(t *testing.T) {
   632  	t.Parallel()
   633  
   634  	var (
   635  		once   sync.Once
   636  		cancel = make(chan struct{})
   637  		term   = func() {
   638  			once.Do(func() {
   639  				close(cancel)
   640  			})
   641  		}
   642  	)
   643  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   644  
   645  	mkSource := func(name string) *testPeer {
   646  		source := newTestPeer(name, t, term)
   647  		source.accountTrie = sourceAccountTrie
   648  		source.accountValues = elems
   649  		return source
   650  	}
   651  	syncer := setupSyncer(mkSource("source"))
   652  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   653  		t.Fatalf("sync failed: %v", err)
   654  	}
   655  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   656  }
   657  
   658  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   659  // panic within the prover
   660  func TestSyncTinyTriePanic(t *testing.T) {
   661  	t.Parallel()
   662  
   663  	var (
   664  		once   sync.Once
   665  		cancel = make(chan struct{})
   666  		term   = func() {
   667  			once.Do(func() {
   668  				close(cancel)
   669  			})
   670  		}
   671  	)
   672  	sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
   673  
   674  	mkSource := func(name string) *testPeer {
   675  		source := newTestPeer(name, t, term)
   676  		source.accountTrie = sourceAccountTrie
   677  		source.accountValues = elems
   678  		return source
   679  	}
   680  	syncer := setupSyncer(mkSource("source"))
   681  	done := checkStall(t, term)
   682  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   683  		t.Fatalf("sync failed: %v", err)
   684  	}
   685  	close(done)
   686  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   687  }
   688  
   689  // TestMultiSync tests a basic sync with multiple peers
   690  func TestMultiSync(t *testing.T) {
   691  	t.Parallel()
   692  
   693  	var (
   694  		once   sync.Once
   695  		cancel = make(chan struct{})
   696  		term   = func() {
   697  			once.Do(func() {
   698  				close(cancel)
   699  			})
   700  		}
   701  	)
   702  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   703  
   704  	mkSource := func(name string) *testPeer {
   705  		source := newTestPeer(name, t, term)
   706  		source.accountTrie = sourceAccountTrie
   707  		source.accountValues = elems
   708  		return source
   709  	}
   710  	syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
   711  	done := checkStall(t, term)
   712  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   713  		t.Fatalf("sync failed: %v", err)
   714  	}
   715  	close(done)
   716  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   717  }
   718  
   719  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   720  func TestSyncWithStorage(t *testing.T) {
   721  	t.Parallel()
   722  
   723  	var (
   724  		once   sync.Once
   725  		cancel = make(chan struct{})
   726  		term   = func() {
   727  			once.Do(func() {
   728  				close(cancel)
   729  			})
   730  		}
   731  	)
   732  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
   733  
   734  	mkSource := func(name string) *testPeer {
   735  		source := newTestPeer(name, t, term)
   736  		source.accountTrie = sourceAccountTrie
   737  		source.accountValues = elems
   738  		source.storageTries = storageTries
   739  		source.storageValues = storageElems
   740  		return source
   741  	}
   742  	syncer := setupSyncer(mkSource("sourceA"))
   743  	done := checkStall(t, term)
   744  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   745  		t.Fatalf("sync failed: %v", err)
   746  	}
   747  	close(done)
   748  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   749  }
   750  
   751  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   752  func TestMultiSyncManyUseless(t *testing.T) {
   753  	t.Parallel()
   754  
   755  	var (
   756  		once   sync.Once
   757  		cancel = make(chan struct{})
   758  		term   = func() {
   759  			once.Do(func() {
   760  				close(cancel)
   761  			})
   762  		}
   763  	)
   764  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   765  
   766  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   767  		source := newTestPeer(name, t, term)
   768  		source.accountTrie = sourceAccountTrie
   769  		source.accountValues = elems
   770  		source.storageTries = storageTries
   771  		source.storageValues = storageElems
   772  
   773  		if !noAccount {
   774  			source.accountRequestHandler = emptyRequestAccountRangeFn
   775  		}
   776  		if !noStorage {
   777  			source.storageRequestHandler = emptyStorageRequestHandler
   778  		}
   779  		if !noTrieNode {
   780  			source.trieRequestHandler = emptyTrieRequestHandler
   781  		}
   782  		return source
   783  	}
   784  
   785  	syncer := setupSyncer(
   786  		mkSource("full", true, true, true),
   787  		mkSource("noAccounts", false, true, true),
   788  		mkSource("noStorage", true, false, true),
   789  		mkSource("noTrie", true, true, false),
   790  	)
   791  	done := checkStall(t, term)
   792  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   793  		t.Fatalf("sync failed: %v", err)
   794  	}
   795  	close(done)
   796  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   797  }
   798  
   799  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   800  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   801  	var (
   802  		once   sync.Once
   803  		cancel = make(chan struct{})
   804  		term   = func() {
   805  			once.Do(func() {
   806  				close(cancel)
   807  			})
   808  		}
   809  	)
   810  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   811  
   812  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   813  		source := newTestPeer(name, t, term)
   814  		source.accountTrie = sourceAccountTrie
   815  		source.accountValues = elems
   816  		source.storageTries = storageTries
   817  		source.storageValues = storageElems
   818  
   819  		if !noAccount {
   820  			source.accountRequestHandler = emptyRequestAccountRangeFn
   821  		}
   822  		if !noStorage {
   823  			source.storageRequestHandler = emptyStorageRequestHandler
   824  		}
   825  		if !noTrieNode {
   826  			source.trieRequestHandler = emptyTrieRequestHandler
   827  		}
   828  		return source
   829  	}
   830  
   831  	syncer := setupSyncer(
   832  		mkSource("full", true, true, true),
   833  		mkSource("noAccounts", false, true, true),
   834  		mkSource("noStorage", true, false, true),
   835  		mkSource("noTrie", true, true, false),
   836  	)
   837  	// We're setting the timeout to very low, to increase the chance of the timeout
   838  	// being triggered. This was previously a cause of panic, when a response
   839  	// arrived simultaneously as a timeout was triggered.
   840  	syncer.rates.OverrideTTLLimit = time.Millisecond
   841  
   842  	done := checkStall(t, term)
   843  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   844  		t.Fatalf("sync failed: %v", err)
   845  	}
   846  	close(done)
   847  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   848  }
   849  
   850  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   851  func TestMultiSyncManyUnresponsive(t *testing.T) {
   852  	var (
   853  		once   sync.Once
   854  		cancel = make(chan struct{})
   855  		term   = func() {
   856  			once.Do(func() {
   857  				close(cancel)
   858  			})
   859  		}
   860  	)
   861  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   862  
   863  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   864  		source := newTestPeer(name, t, term)
   865  		source.accountTrie = sourceAccountTrie
   866  		source.accountValues = elems
   867  		source.storageTries = storageTries
   868  		source.storageValues = storageElems
   869  
   870  		if !noAccount {
   871  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   872  		}
   873  		if !noStorage {
   874  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   875  		}
   876  		if !noTrieNode {
   877  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   878  		}
   879  		return source
   880  	}
   881  
   882  	syncer := setupSyncer(
   883  		mkSource("full", true, true, true),
   884  		mkSource("noAccounts", false, true, true),
   885  		mkSource("noStorage", true, false, true),
   886  		mkSource("noTrie", true, true, false),
   887  	)
   888  	// We're setting the timeout to very low, to make the test run a bit faster
   889  	syncer.rates.OverrideTTLLimit = time.Millisecond
   890  
   891  	done := checkStall(t, term)
   892  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   893  		t.Fatalf("sync failed: %v", err)
   894  	}
   895  	close(done)
   896  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   897  }
   898  
   899  func checkStall(t *testing.T, term func()) chan struct{} {
   900  	testDone := make(chan struct{})
   901  	go func() {
   902  		select {
   903  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   904  			t.Log("Sync stalled")
   905  			term()
   906  		case <-testDone:
   907  			return
   908  		}
   909  	}()
   910  	return testDone
   911  }
   912  
   913  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   914  // account trie has a few boundary elements.
   915  func TestSyncBoundaryAccountTrie(t *testing.T) {
   916  	t.Parallel()
   917  
   918  	var (
   919  		once   sync.Once
   920  		cancel = make(chan struct{})
   921  		term   = func() {
   922  			once.Do(func() {
   923  				close(cancel)
   924  			})
   925  		}
   926  	)
   927  	sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
   928  
   929  	mkSource := func(name string) *testPeer {
   930  		source := newTestPeer(name, t, term)
   931  		source.accountTrie = sourceAccountTrie
   932  		source.accountValues = elems
   933  		return source
   934  	}
   935  	syncer := setupSyncer(
   936  		mkSource("peer-a"),
   937  		mkSource("peer-b"),
   938  	)
   939  	done := checkStall(t, term)
   940  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   941  		t.Fatalf("sync failed: %v", err)
   942  	}
   943  	close(done)
   944  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   945  }
   946  
   947  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
   948  // consistently returning very small results
   949  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
   950  	t.Parallel()
   951  
   952  	var (
   953  		once   sync.Once
   954  		cancel = make(chan struct{})
   955  		term   = func() {
   956  			once.Do(func() {
   957  				close(cancel)
   958  			})
   959  		}
   960  	)
   961  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   962  
   963  	mkSource := func(name string, slow bool) *testPeer {
   964  		source := newTestPeer(name, t, term)
   965  		source.accountTrie = sourceAccountTrie
   966  		source.accountValues = elems
   967  
   968  		if slow {
   969  			source.accountRequestHandler = starvingAccountRequestHandler
   970  		}
   971  		return source
   972  	}
   973  
   974  	syncer := setupSyncer(
   975  		mkSource("nice-a", false),
   976  		mkSource("nice-b", false),
   977  		mkSource("nice-c", false),
   978  		mkSource("capped", true),
   979  	)
   980  	done := checkStall(t, term)
   981  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   982  		t.Fatalf("sync failed: %v", err)
   983  	}
   984  	close(done)
   985  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   986  }
   987  
   988  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
   989  // code requests properly.
   990  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
   991  	t.Parallel()
   992  
   993  	var (
   994  		once   sync.Once
   995  		cancel = make(chan struct{})
   996  		term   = func() {
   997  			once.Do(func() {
   998  				close(cancel)
   999  			})
  1000  		}
  1001  	)
  1002  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1003  
  1004  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1005  		source := newTestPeer(name, t, term)
  1006  		source.accountTrie = sourceAccountTrie
  1007  		source.accountValues = elems
  1008  		source.codeRequestHandler = codeFn
  1009  		return source
  1010  	}
  1011  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1012  	// chance that the full set of codes requested are sent only to the
  1013  	// non-corrupt peer, which delivers everything in one go, and makes the
  1014  	// test moot
  1015  	syncer := setupSyncer(
  1016  		mkSource("capped", cappedCodeRequestHandler),
  1017  		mkSource("corrupt", corruptCodeRequestHandler),
  1018  	)
  1019  	done := checkStall(t, term)
  1020  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1021  		t.Fatalf("sync failed: %v", err)
  1022  	}
  1023  	close(done)
  1024  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1025  }
  1026  
  1027  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1028  	t.Parallel()
  1029  
  1030  	var (
  1031  		once   sync.Once
  1032  		cancel = make(chan struct{})
  1033  		term   = func() {
  1034  			once.Do(func() {
  1035  				close(cancel)
  1036  			})
  1037  		}
  1038  	)
  1039  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1040  
  1041  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1042  		source := newTestPeer(name, t, term)
  1043  		source.accountTrie = sourceAccountTrie
  1044  		source.accountValues = elems
  1045  		source.accountRequestHandler = accFn
  1046  		return source
  1047  	}
  1048  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1049  	// chance that the full set of codes requested are sent only to the
  1050  	// non-corrupt peer, which delivers everything in one go, and makes the
  1051  	// test moot
  1052  	syncer := setupSyncer(
  1053  		mkSource("capped", defaultAccountRequestHandler),
  1054  		mkSource("corrupt", corruptAccountRequestHandler),
  1055  	)
  1056  	done := checkStall(t, term)
  1057  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1058  		t.Fatalf("sync failed: %v", err)
  1059  	}
  1060  	close(done)
  1061  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1062  }
  1063  
  1064  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1065  // one by one
  1066  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1067  	t.Parallel()
  1068  
  1069  	var (
  1070  		once   sync.Once
  1071  		cancel = make(chan struct{})
  1072  		term   = func() {
  1073  			once.Do(func() {
  1074  				close(cancel)
  1075  			})
  1076  		}
  1077  	)
  1078  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1079  
  1080  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1081  		source := newTestPeer(name, t, term)
  1082  		source.accountTrie = sourceAccountTrie
  1083  		source.accountValues = elems
  1084  		source.codeRequestHandler = codeFn
  1085  		return source
  1086  	}
  1087  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1088  	// so it shouldn't be more than that
  1089  	var counter int
  1090  	syncer := setupSyncer(
  1091  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1092  			counter++
  1093  			return cappedCodeRequestHandler(t, id, hashes, max)
  1094  		}),
  1095  	)
  1096  	done := checkStall(t, term)
  1097  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1098  		t.Fatalf("sync failed: %v", err)
  1099  	}
  1100  	close(done)
  1101  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1102  	// deduplication is per request batch. If it were a perfect global dedup,
  1103  	// we would expect only 8 requests. If there were no dedup, there would be
  1104  	// 3k requests.
  1105  	// We expect somewhere below 100 requests for these 8 unique hashes.
  1106  	if threshold := 100; counter > threshold {
  1107  		t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
  1108  	}
  1109  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1110  }
  1111  
  1112  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1113  // storage trie has a few boundary elements.
  1114  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1115  	t.Parallel()
  1116  
  1117  	var (
  1118  		once   sync.Once
  1119  		cancel = make(chan struct{})
  1120  		term   = func() {
  1121  			once.Do(func() {
  1122  				close(cancel)
  1123  			})
  1124  		}
  1125  	)
  1126  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1127  
  1128  	mkSource := func(name string) *testPeer {
  1129  		source := newTestPeer(name, t, term)
  1130  		source.accountTrie = sourceAccountTrie
  1131  		source.accountValues = elems
  1132  		source.storageTries = storageTries
  1133  		source.storageValues = storageElems
  1134  		return source
  1135  	}
  1136  	syncer := setupSyncer(
  1137  		mkSource("peer-a"),
  1138  		mkSource("peer-b"),
  1139  	)
  1140  	done := checkStall(t, term)
  1141  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1142  		t.Fatalf("sync failed: %v", err)
  1143  	}
  1144  	close(done)
  1145  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1146  }
  1147  
  1148  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1149  // consistently returning very small results
  1150  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1151  	t.Parallel()
  1152  
  1153  	var (
  1154  		once   sync.Once
  1155  		cancel = make(chan struct{})
  1156  		term   = func() {
  1157  			once.Do(func() {
  1158  				close(cancel)
  1159  			})
  1160  		}
  1161  	)
  1162  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1163  
  1164  	mkSource := func(name string, slow bool) *testPeer {
  1165  		source := newTestPeer(name, t, term)
  1166  		source.accountTrie = sourceAccountTrie
  1167  		source.accountValues = elems
  1168  		source.storageTries = storageTries
  1169  		source.storageValues = storageElems
  1170  
  1171  		if slow {
  1172  			source.storageRequestHandler = starvingStorageRequestHandler
  1173  		}
  1174  		return source
  1175  	}
  1176  
  1177  	syncer := setupSyncer(
  1178  		mkSource("nice-a", false),
  1179  		mkSource("slow", true),
  1180  	)
  1181  	done := checkStall(t, term)
  1182  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1183  		t.Fatalf("sync failed: %v", err)
  1184  	}
  1185  	close(done)
  1186  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1187  }
  1188  
  1189  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1190  // sometimes sending bad proofs
  1191  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1192  	t.Parallel()
  1193  
  1194  	var (
  1195  		once   sync.Once
  1196  		cancel = make(chan struct{})
  1197  		term   = func() {
  1198  			once.Do(func() {
  1199  				close(cancel)
  1200  			})
  1201  		}
  1202  	)
  1203  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1204  
  1205  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1206  		source := newTestPeer(name, t, term)
  1207  		source.accountTrie = sourceAccountTrie
  1208  		source.accountValues = elems
  1209  		source.storageTries = storageTries
  1210  		source.storageValues = storageElems
  1211  		source.storageRequestHandler = handler
  1212  		return source
  1213  	}
  1214  
  1215  	syncer := setupSyncer(
  1216  		mkSource("nice-a", defaultStorageRequestHandler),
  1217  		mkSource("nice-b", defaultStorageRequestHandler),
  1218  		mkSource("nice-c", defaultStorageRequestHandler),
  1219  		mkSource("corrupt", corruptStorageRequestHandler),
  1220  	)
  1221  	done := checkStall(t, term)
  1222  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1223  		t.Fatalf("sync failed: %v", err)
  1224  	}
  1225  	close(done)
  1226  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1227  }
  1228  
  1229  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1230  	t.Parallel()
  1231  
  1232  	var (
  1233  		once   sync.Once
  1234  		cancel = make(chan struct{})
  1235  		term   = func() {
  1236  			once.Do(func() {
  1237  				close(cancel)
  1238  			})
  1239  		}
  1240  	)
  1241  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1242  
  1243  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1244  		source := newTestPeer(name, t, term)
  1245  		source.accountTrie = sourceAccountTrie
  1246  		source.accountValues = elems
  1247  		source.storageTries = storageTries
  1248  		source.storageValues = storageElems
  1249  		source.storageRequestHandler = handler
  1250  		return source
  1251  	}
  1252  	syncer := setupSyncer(
  1253  		mkSource("nice-a", defaultStorageRequestHandler),
  1254  		mkSource("nice-b", defaultStorageRequestHandler),
  1255  		mkSource("nice-c", defaultStorageRequestHandler),
  1256  		mkSource("corrupt", noProofStorageRequestHandler),
  1257  	)
  1258  	done := checkStall(t, term)
  1259  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1260  		t.Fatalf("sync failed: %v", err)
  1261  	}
  1262  	close(done)
  1263  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1264  }
  1265  
  1266  // TestSyncWithStorage tests  basic sync using accounts + storage + code, against
  1267  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1268  // an error, where the recipient erroneously clipped the boundary nodes, but
  1269  // did not mark the account for healing.
  1270  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1271  	t.Parallel()
  1272  	var (
  1273  		once   sync.Once
  1274  		cancel = make(chan struct{})
  1275  		term   = func() {
  1276  			once.Do(func() {
  1277  				close(cancel)
  1278  			})
  1279  		}
  1280  	)
  1281  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1282  
  1283  	mkSource := func(name string) *testPeer {
  1284  		source := newTestPeer(name, t, term)
  1285  		source.accountTrie = sourceAccountTrie
  1286  		source.accountValues = elems
  1287  		source.storageTries = storageTries
  1288  		source.storageValues = storageElems
  1289  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1290  		return source
  1291  	}
  1292  	syncer := setupSyncer(mkSource("sourceA"))
  1293  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1294  		t.Fatalf("sync failed: %v", err)
  1295  	}
  1296  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1297  }
  1298  
  1299  type kv struct {
  1300  	k, v []byte
  1301  }
  1302  
  1303  // Some helpers for sorting
  1304  type entrySlice []*kv
  1305  
  1306  func (p entrySlice) Len() int           { return len(p) }
  1307  func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1308  func (p entrySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
  1309  
  1310  func key32(i uint64) []byte {
  1311  	key := make([]byte, 32)
  1312  	binary.LittleEndian.PutUint64(key, i)
  1313  	return key
  1314  }
  1315  
  1316  var (
  1317  	keccakCodehashes = []common.Hash{
  1318  		crypto.Keccak256Hash([]byte{0}),
  1319  		crypto.Keccak256Hash([]byte{1}),
  1320  		crypto.Keccak256Hash([]byte{2}),
  1321  		crypto.Keccak256Hash([]byte{3}),
  1322  		crypto.Keccak256Hash([]byte{4}),
  1323  		crypto.Keccak256Hash([]byte{5}),
  1324  		crypto.Keccak256Hash([]byte{6}),
  1325  		crypto.Keccak256Hash([]byte{7}),
  1326  	}
  1327  
  1328  	poseidonCodehashes = []common.Hash{
  1329  		codehash.PoseidonCodeHash([]byte{0}),
  1330  		codehash.PoseidonCodeHash([]byte{1}),
  1331  		codehash.PoseidonCodeHash([]byte{2}),
  1332  		codehash.PoseidonCodeHash([]byte{3}),
  1333  		codehash.PoseidonCodeHash([]byte{4}),
  1334  		codehash.PoseidonCodeHash([]byte{5}),
  1335  		codehash.PoseidonCodeHash([]byte{6}),
  1336  		codehash.PoseidonCodeHash([]byte{7}),
  1337  	}
  1338  )
  1339  
  1340  // getKeccakCodeHash returns a pseudo-random code hash
  1341  func getKeccakCodeHash(i uint64) []byte {
  1342  	h := keccakCodehashes[int(i)%len(keccakCodehashes)]
  1343  	return common.CopyBytes(h[:])
  1344  }
  1345  
  1346  // getPoseidonCodeHash returns a pseudo-random code hash
  1347  func getPoseidonCodeHash(i uint64) []byte {
  1348  	h := poseidonCodehashes[int(i)%len(poseidonCodehashes)]
  1349  	return common.CopyBytes(h[:])
  1350  }
  1351  
  1352  // getCodeByHash convenience function to lookup the code from the code hash
  1353  func getCodeByHash(hash common.Hash) []byte {
  1354  	if hash == emptyKeccakCodeHash {
  1355  		return nil
  1356  	}
  1357  	for i, h := range keccakCodehashes {
  1358  		if h == hash {
  1359  			return []byte{byte(i)}
  1360  		}
  1361  	}
  1362  	return nil
  1363  }
  1364  
  1365  // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1366  func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
  1367  	db := trie.NewDatabase(rawdb.NewMemoryDatabase())
  1368  	accTrie, _ := trie.New(common.Hash{}, db)
  1369  	var entries entrySlice
  1370  	for i := uint64(1); i <= uint64(n); i++ {
  1371  		value, _ := rlp.EncodeToBytes(types.StateAccount{
  1372  			Nonce:            i,
  1373  			Balance:          big.NewInt(int64(i)),
  1374  			Root:             emptyRoot,
  1375  			KeccakCodeHash:   getKeccakCodeHash(i),
  1376  			PoseidonCodeHash: getPoseidonCodeHash(i),
  1377  			CodeSize:         1,
  1378  		})
  1379  		key := key32(i)
  1380  		elem := &kv{key, value}
  1381  		accTrie.Update(elem.k, elem.v)
  1382  		entries = append(entries, elem)
  1383  	}
  1384  	sort.Sort(entries)
  1385  	accTrie.Commit(nil)
  1386  	return accTrie, entries
  1387  }
  1388  
  1389  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1390  // accounts normally, this function will fill a few accounts which have
  1391  // boundary hash.
  1392  func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
  1393  	var (
  1394  		entries    entrySlice
  1395  		boundaries []common.Hash
  1396  
  1397  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1398  		trie, _ = trie.New(common.Hash{}, db)
  1399  	)
  1400  	// Initialize boundaries
  1401  	var next common.Hash
  1402  	step := new(big.Int).Sub(
  1403  		new(big.Int).Div(
  1404  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1405  			big.NewInt(int64(accountConcurrency)),
  1406  		), common.Big1,
  1407  	)
  1408  	for i := 0; i < accountConcurrency; i++ {
  1409  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1410  		if i == accountConcurrency-1 {
  1411  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1412  		}
  1413  		boundaries = append(boundaries, last)
  1414  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1415  	}
  1416  	// Fill boundary accounts
  1417  	for i := 0; i < len(boundaries); i++ {
  1418  		value, _ := rlp.EncodeToBytes(types.StateAccount{
  1419  			Nonce:            uint64(0),
  1420  			Balance:          big.NewInt(int64(i)),
  1421  			Root:             emptyRoot,
  1422  			KeccakCodeHash:   getKeccakCodeHash(uint64(i)),
  1423  			PoseidonCodeHash: getPoseidonCodeHash(uint64(i)),
  1424  			CodeSize:         1,
  1425  		})
  1426  		elem := &kv{boundaries[i].Bytes(), value}
  1427  		trie.Update(elem.k, elem.v)
  1428  		entries = append(entries, elem)
  1429  	}
  1430  	// Fill other accounts if required
  1431  	for i := uint64(1); i <= uint64(n); i++ {
  1432  		value, _ := rlp.EncodeToBytes(types.StateAccount{
  1433  			Nonce:            i,
  1434  			Balance:          big.NewInt(int64(i)),
  1435  			Root:             emptyRoot,
  1436  			KeccakCodeHash:   getKeccakCodeHash(i),
  1437  			PoseidonCodeHash: getPoseidonCodeHash(i),
  1438  			CodeSize:         1,
  1439  		})
  1440  		elem := &kv{key32(i), value}
  1441  		trie.Update(elem.k, elem.v)
  1442  		entries = append(entries, elem)
  1443  	}
  1444  	sort.Sort(entries)
  1445  	trie.Commit(nil)
  1446  	return trie, entries
  1447  }
  1448  
  1449  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1450  // has a unique storage set.
  1451  func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1452  	var (
  1453  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1454  		accTrie, _     = trie.New(common.Hash{}, db)
  1455  		entries        entrySlice
  1456  		storageTries   = make(map[common.Hash]*trie.Trie)
  1457  		storageEntries = make(map[common.Hash]entrySlice)
  1458  	)
  1459  	// Create n accounts in the trie
  1460  	for i := uint64(1); i <= uint64(accounts); i++ {
  1461  		key := key32(i)
  1462  		keccakCodehash := emptyKeccakCodeHash[:]
  1463  		poseidonCodeHash := emptyPoseidonCodeHash[:]
  1464  		if code {
  1465  			keccakCodehash = getKeccakCodeHash(i)
  1466  			poseidonCodeHash = getPoseidonCodeHash(i)
  1467  		}
  1468  		// Create a storage trie
  1469  		stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
  1470  		stRoot := stTrie.Hash()
  1471  		stTrie.Commit(nil)
  1472  		value, _ := rlp.EncodeToBytes(types.StateAccount{
  1473  			Nonce:            i,
  1474  			Balance:          big.NewInt(int64(i)),
  1475  			Root:             stRoot,
  1476  			KeccakCodeHash:   keccakCodehash,
  1477  			PoseidonCodeHash: poseidonCodeHash,
  1478  			CodeSize:         1,
  1479  		})
  1480  		elem := &kv{key, value}
  1481  		accTrie.Update(elem.k, elem.v)
  1482  		entries = append(entries, elem)
  1483  
  1484  		storageTries[common.BytesToHash(key)] = stTrie
  1485  		storageEntries[common.BytesToHash(key)] = stEntries
  1486  	}
  1487  	sort.Sort(entries)
  1488  
  1489  	accTrie.Commit(nil)
  1490  	return accTrie, entries, storageTries, storageEntries
  1491  }
  1492  
  1493  // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1494  func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1495  	var (
  1496  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1497  		accTrie, _     = trie.New(common.Hash{}, db)
  1498  		entries        entrySlice
  1499  		storageTries   = make(map[common.Hash]*trie.Trie)
  1500  		storageEntries = make(map[common.Hash]entrySlice)
  1501  	)
  1502  	// Make a storage trie which we reuse for the whole lot
  1503  	var (
  1504  		stTrie    *trie.Trie
  1505  		stEntries entrySlice
  1506  	)
  1507  	if boundary {
  1508  		stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
  1509  	} else {
  1510  		stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
  1511  	}
  1512  	stRoot := stTrie.Hash()
  1513  
  1514  	// Create n accounts in the trie
  1515  	for i := uint64(1); i <= uint64(accounts); i++ {
  1516  		key := key32(i)
  1517  		keccakCodehash := emptyKeccakCodeHash[:]
  1518  		poseidonCodeHash := emptyPoseidonCodeHash[:]
  1519  		if code {
  1520  			keccakCodehash = getKeccakCodeHash(i)
  1521  			poseidonCodeHash = getPoseidonCodeHash(i)
  1522  		}
  1523  		value, _ := rlp.EncodeToBytes(types.StateAccount{
  1524  			Nonce:            i,
  1525  			Balance:          big.NewInt(int64(i)),
  1526  			Root:             stRoot,
  1527  			KeccakCodeHash:   keccakCodehash,
  1528  			PoseidonCodeHash: poseidonCodeHash,
  1529  			CodeSize:         1,
  1530  		})
  1531  		elem := &kv{key, value}
  1532  		accTrie.Update(elem.k, elem.v)
  1533  		entries = append(entries, elem)
  1534  		// we reuse the same one for all accounts
  1535  		storageTries[common.BytesToHash(key)] = stTrie
  1536  		storageEntries[common.BytesToHash(key)] = stEntries
  1537  	}
  1538  	sort.Sort(entries)
  1539  	stTrie.Commit(nil)
  1540  	accTrie.Commit(nil)
  1541  	return accTrie, entries, storageTries, storageEntries
  1542  }
  1543  
  1544  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1545  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1546  // that tries are unique.
  1547  func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
  1548  	trie, _ := trie.New(common.Hash{}, db)
  1549  	var entries entrySlice
  1550  	for i := uint64(1); i <= n; i++ {
  1551  		// store 'x' at slot 'x'
  1552  		slotValue := key32(i + seed)
  1553  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1554  
  1555  		slotKey := key32(i)
  1556  		key := crypto.Keccak256Hash(slotKey[:])
  1557  
  1558  		elem := &kv{key[:], rlpSlotValue}
  1559  		trie.Update(elem.k, elem.v)
  1560  		entries = append(entries, elem)
  1561  	}
  1562  	sort.Sort(entries)
  1563  	trie.Commit(nil)
  1564  	return trie, entries
  1565  }
  1566  
  1567  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1568  // storage slots normally, this function will fill a few slots which have
  1569  // boundary hash.
  1570  func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
  1571  	var (
  1572  		entries    entrySlice
  1573  		boundaries []common.Hash
  1574  		trie, _    = trie.New(common.Hash{}, db)
  1575  	)
  1576  	// Initialize boundaries
  1577  	var next common.Hash
  1578  	step := new(big.Int).Sub(
  1579  		new(big.Int).Div(
  1580  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1581  			big.NewInt(int64(accountConcurrency)),
  1582  		), common.Big1,
  1583  	)
  1584  	for i := 0; i < accountConcurrency; i++ {
  1585  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1586  		if i == accountConcurrency-1 {
  1587  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1588  		}
  1589  		boundaries = append(boundaries, last)
  1590  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1591  	}
  1592  	// Fill boundary slots
  1593  	for i := 0; i < len(boundaries); i++ {
  1594  		key := boundaries[i]
  1595  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1596  
  1597  		elem := &kv{key[:], val}
  1598  		trie.Update(elem.k, elem.v)
  1599  		entries = append(entries, elem)
  1600  	}
  1601  	// Fill other slots if required
  1602  	for i := uint64(1); i <= uint64(n); i++ {
  1603  		slotKey := key32(i)
  1604  		key := crypto.Keccak256Hash(slotKey[:])
  1605  
  1606  		slotValue := key32(i)
  1607  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1608  
  1609  		elem := &kv{key[:], rlpSlotValue}
  1610  		trie.Update(elem.k, elem.v)
  1611  		entries = append(entries, elem)
  1612  	}
  1613  	sort.Sort(entries)
  1614  	trie.Commit(nil)
  1615  	return trie, entries
  1616  }
  1617  
  1618  func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1619  	t.Helper()
  1620  	triedb := trie.NewDatabase(db)
  1621  	accTrie, err := trie.New(root, triedb)
  1622  	if err != nil {
  1623  		t.Fatal(err)
  1624  	}
  1625  	accounts, slots := 0, 0
  1626  	accIt := trie.NewIterator(accTrie.NodeIterator(nil))
  1627  	for accIt.Next() {
  1628  		var acc struct {
  1629  			Nonce            uint64
  1630  			Balance          *big.Int
  1631  			Root             common.Hash
  1632  			KeccakCodeHash   []byte
  1633  			PoseidonCodeHash []byte
  1634  			CodeSize         uint64
  1635  		}
  1636  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1637  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1638  		}
  1639  		accounts++
  1640  		if acc.Root != emptyRoot {
  1641  			storeTrie, err := trie.NewSecure(acc.Root, triedb)
  1642  			if err != nil {
  1643  				t.Fatal(err)
  1644  			}
  1645  			storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
  1646  			for storeIt.Next() {
  1647  				slots++
  1648  			}
  1649  			if err := storeIt.Err; err != nil {
  1650  				t.Fatal(err)
  1651  			}
  1652  		}
  1653  	}
  1654  	if err := accIt.Err; err != nil {
  1655  		t.Fatal(err)
  1656  	}
  1657  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1658  }
  1659  
  1660  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1661  // state healing
  1662  func TestSyncAccountPerformance(t *testing.T) {
  1663  	// Set the account concurrency to 1. This _should_ result in the
  1664  	// range root to become correct, and there should be no healing needed
  1665  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1666  	accountConcurrency = 1
  1667  
  1668  	var (
  1669  		once   sync.Once
  1670  		cancel = make(chan struct{})
  1671  		term   = func() {
  1672  			once.Do(func() {
  1673  				close(cancel)
  1674  			})
  1675  		}
  1676  	)
  1677  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  1678  
  1679  	mkSource := func(name string) *testPeer {
  1680  		source := newTestPeer(name, t, term)
  1681  		source.accountTrie = sourceAccountTrie
  1682  		source.accountValues = elems
  1683  		return source
  1684  	}
  1685  	src := mkSource("source")
  1686  	syncer := setupSyncer(src)
  1687  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1688  		t.Fatalf("sync failed: %v", err)
  1689  	}
  1690  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1691  	// The trie root will always be requested, since it is added when the snap
  1692  	// sync cycle starts. When popping the queue, we do not look it up again.
  1693  	// Doing so would bring this number down to zero in this artificial testcase,
  1694  	// but only add extra IO for no reason in practice.
  1695  	if have, want := src.nTrienodeRequests, 1; have != want {
  1696  		fmt.Printf(src.Stats())
  1697  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1698  	}
  1699  }
  1700  
  1701  func TestSlotEstimation(t *testing.T) {
  1702  	for i, tc := range []struct {
  1703  		last  common.Hash
  1704  		count int
  1705  		want  uint64
  1706  	}{
  1707  		{
  1708  			// Half the space
  1709  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1710  			100,
  1711  			100,
  1712  		},
  1713  		{
  1714  			// 1 / 16th
  1715  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1716  			100,
  1717  			1500,
  1718  		},
  1719  		{
  1720  			// Bit more than 1 / 16th
  1721  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1722  			100,
  1723  			1499,
  1724  		},
  1725  		{
  1726  			// Almost everything
  1727  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1728  			100,
  1729  			6,
  1730  		},
  1731  		{
  1732  			// Almost nothing -- should lead to error
  1733  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1734  			1,
  1735  			0,
  1736  		},
  1737  		{
  1738  			// Nothing -- should lead to error
  1739  			common.Hash{},
  1740  			100,
  1741  			0,
  1742  		},
  1743  	} {
  1744  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1745  		if want := tc.want; have != want {
  1746  			t.Errorf("test %d: have %d want %d", i, have, want)
  1747  		}
  1748  	}
  1749  }