github.com/klaytn/klaytn@v1.12.1/node/cn/snap/sync_test.go (about)

     1  // Modifications Copyright 2022 The klaytn Authors
     2  // Copyright 2020 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from eth/protocols/snap/sync_test.go (2022/06/29).
    19  // Modified and improved for the klaytn development.
    20  
    21  package snap
    22  
    23  import (
    24  	"bytes"
    25  	"crypto/rand"
    26  	"encoding/binary"
    27  	"fmt"
    28  	"math/big"
    29  	"sort"
    30  	"sync"
    31  	"testing"
    32  	"time"
    33  
    34  	"github.com/klaytn/klaytn/blockchain/types/account"
    35  	"github.com/klaytn/klaytn/blockchain/types/accountkey"
    36  	"github.com/klaytn/klaytn/common"
    37  	"github.com/klaytn/klaytn/crypto"
    38  	"github.com/klaytn/klaytn/log"
    39  	"github.com/klaytn/klaytn/params"
    40  	"github.com/klaytn/klaytn/rlp"
    41  	"github.com/klaytn/klaytn/storage/database"
    42  	"github.com/klaytn/klaytn/storage/statedb"
    43  	"golang.org/x/crypto/sha3"
    44  )
    45  
    46  func genExternallyOwnedAccount(nonce uint64, balance *big.Int) (account.Account, error) {
    47  	return account.NewAccountWithMap(account.ExternallyOwnedAccountType, map[account.AccountValueKeyType]interface{}{
    48  		account.AccountValueKeyNonce:         nonce,
    49  		account.AccountValueKeyBalance:       balance,
    50  		account.AccountValueKeyHumanReadable: false,
    51  		account.AccountValueKeyAccountKey:    accountkey.NewAccountKeyLegacy(),
    52  	})
    53  }
    54  
    55  func genSmartContractAccount(nonce uint64, balance *big.Int, storageRoot common.Hash, codeHash []byte) (account.Account, error) {
    56  	return account.NewAccountWithMap(account.SmartContractAccountType, map[account.AccountValueKeyType]interface{}{
    57  		account.AccountValueKeyNonce:         nonce,
    58  		account.AccountValueKeyBalance:       balance,
    59  		account.AccountValueKeyHumanReadable: false,
    60  		account.AccountValueKeyAccountKey:    accountkey.NewAccountKeyLegacy(),
    61  		account.AccountValueKeyStorageRoot:   storageRoot,
    62  		account.AccountValueKeyCodeHash:      codeHash,
    63  		account.AccountValueKeyCodeInfo:      params.CodeInfo(0),
    64  	})
    65  }
    66  
    67  func TestHashing(t *testing.T) {
    68  	t.Parallel()
    69  
    70  	bytecodes := make([][]byte, 10)
    71  	for i := 0; i < len(bytecodes); i++ {
    72  		buf := make([]byte, 100)
    73  		rand.Read(buf)
    74  		bytecodes[i] = buf
    75  	}
    76  	var want, got string
    77  	old := func() {
    78  		hasher := sha3.NewLegacyKeccak256()
    79  		for i := 0; i < len(bytecodes); i++ {
    80  			hasher.Reset()
    81  			hasher.Write(bytecodes[i])
    82  			hash := hasher.Sum(nil)
    83  			got = fmt.Sprintf("%v\n%v", got, hash)
    84  		}
    85  	}
    86  	new := func() {
    87  		hasher := sha3.NewLegacyKeccak256().(statedb.KeccakState)
    88  		hash := make([]byte, 32)
    89  		for i := 0; i < len(bytecodes); i++ {
    90  			hasher.Reset()
    91  			hasher.Write(bytecodes[i])
    92  			hasher.Read(hash)
    93  			want = fmt.Sprintf("%v\n%v", want, hash)
    94  		}
    95  	}
    96  	old()
    97  	new()
    98  	if want != got {
    99  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
   100  	}
   101  }
   102  
   103  func BenchmarkHashing(b *testing.B) {
   104  	bytecodes := make([][]byte, 10000)
   105  	for i := 0; i < len(bytecodes); i++ {
   106  		buf := make([]byte, 100)
   107  		rand.Read(buf)
   108  		bytecodes[i] = buf
   109  	}
   110  	old := func() {
   111  		hasher := sha3.NewLegacyKeccak256()
   112  		for i := 0; i < len(bytecodes); i++ {
   113  			hasher.Reset()
   114  			hasher.Write(bytecodes[i])
   115  			hasher.Sum(nil)
   116  		}
   117  	}
   118  	new := func() {
   119  		hasher := sha3.NewLegacyKeccak256().(statedb.KeccakState)
   120  		hash := make([]byte, 32)
   121  		for i := 0; i < len(bytecodes); i++ {
   122  			hasher.Reset()
   123  			hasher.Write(bytecodes[i])
   124  			hasher.Read(hash)
   125  		}
   126  	}
   127  	b.Run("old", func(b *testing.B) {
   128  		b.ReportAllocs()
   129  		for i := 0; i < b.N; i++ {
   130  			old()
   131  		}
   132  	})
   133  	b.Run("new", func(b *testing.B) {
   134  		b.ReportAllocs()
   135  		for i := 0; i < b.N; i++ {
   136  			new()
   137  		}
   138  	})
   139  }
   140  
   141  type (
   142  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   143  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   144  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   145  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   146  )
   147  
   148  type testPeer struct {
   149  	id            string
   150  	test          *testing.T
   151  	remote        *Syncer
   152  	logger        log.Logger
   153  	accountTrie   *statedb.Trie
   154  	accountValues entrySlice
   155  	storageTries  map[common.Hash]*statedb.Trie
   156  	storageValues map[common.Hash]entrySlice
   157  
   158  	accountRequestHandler accountHandlerFunc
   159  	storageRequestHandler storageHandlerFunc
   160  	trieRequestHandler    trieHandlerFunc
   161  	codeRequestHandler    codeHandlerFunc
   162  	term                  func()
   163  
   164  	// counters
   165  	nAccountRequests  int
   166  	nStorageRequests  int
   167  	nBytecodeRequests int
   168  	nTrienodeRequests int
   169  }
   170  
   171  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   172  	peer := &testPeer{
   173  		id:                    id,
   174  		test:                  t,
   175  		logger:                logger.NewWith("id", id),
   176  		accountRequestHandler: defaultAccountRequestHandler,
   177  		trieRequestHandler:    defaultTrieRequestHandler,
   178  		storageRequestHandler: defaultStorageRequestHandler,
   179  		codeRequestHandler:    defaultCodeRequestHandler,
   180  		term:                  term,
   181  	}
   182  	// stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   183  	// peer.logger.SetHandler(stderrHandler)
   184  	return peer
   185  }
   186  
   187  func (t *testPeer) ID() string      { return t.id }
   188  func (t *testPeer) Log() log.Logger { return t.logger }
   189  
   190  func (t *testPeer) Stats() string {
   191  	return fmt.Sprintf(`Account requests: %d
   192  Storage requests: %d
   193  Bytecode requests: %d
   194  Trienode requests: %d
   195  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   196  }
   197  
   198  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   199  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   200  	t.nAccountRequests++
   201  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   202  	return nil
   203  }
   204  
   205  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   206  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   207  	t.nTrienodeRequests++
   208  	go t.trieRequestHandler(t, id, root, paths, bytes)
   209  	return nil
   210  }
   211  
   212  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   213  	t.nStorageRequests++
   214  	if len(accounts) == 1 && origin != nil {
   215  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   216  	} else {
   217  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   218  	}
   219  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   220  	return nil
   221  }
   222  
   223  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   224  	t.nBytecodeRequests++
   225  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   226  	go t.codeRequestHandler(t, id, hashes, bytes)
   227  	return nil
   228  }
   229  
   230  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   231  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   232  	// Pass the response
   233  	var nodes [][]byte
   234  	for _, pathset := range paths {
   235  		switch len(pathset) {
   236  		case 1:
   237  			blob, _, err := t.accountTrie.TryGetNode(pathset[0])
   238  			if err != nil {
   239  				t.logger.Info("Error handling req", "error", err)
   240  				break
   241  			}
   242  			nodes = append(nodes, blob)
   243  		default:
   244  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   245  			for _, path := range pathset[1:] {
   246  				blob, _, err := account.TryGetNode(path)
   247  				if err != nil {
   248  					t.logger.Info("Error handling req", "error", err)
   249  					break
   250  				}
   251  				nodes = append(nodes, blob)
   252  			}
   253  		}
   254  	}
   255  	t.remote.OnTrieNodes(t, requestId, nodes)
   256  	return nil
   257  }
   258  
   259  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   260  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   261  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   262  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   263  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   264  		t.term()
   265  		return err
   266  	}
   267  	return nil
   268  }
   269  
   270  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   271  	var size uint64
   272  	if limit == (common.Hash{}) {
   273  		limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   274  	}
   275  	for _, entry := range t.accountValues {
   276  		if size > cap {
   277  			break
   278  		}
   279  		if bytes.Compare(origin[:], entry.k) <= 0 {
   280  			keys = append(keys, common.BytesToHash(entry.k))
   281  			vals = append(vals, entry.v)
   282  			size += uint64(32 + len(entry.v))
   283  		}
   284  		// If we've exceeded the request threshold, abort
   285  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   286  			break
   287  		}
   288  	}
   289  	// Unless we send the entire trie, we need to supply proofs
   290  	// Actually, we need to supply proofs either way! This seems to be an implementation
   291  	// quirk in go-ethereum
   292  	proof := NewNodeSet()
   293  	if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   294  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   295  	}
   296  	if len(keys) > 0 {
   297  		lastK := (keys[len(keys)-1])[:]
   298  		if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
   299  			t.logger.Error("Could not prove last item", "error", err)
   300  		}
   301  	}
   302  	for _, blob := range proof.NodeList() {
   303  		proofs = append(proofs, blob)
   304  	}
   305  	return keys, vals, proofs
   306  }
   307  
   308  // defaultStorageRequestHandler is a well-behaving storage request handler
   309  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   310  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   311  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   312  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   313  		t.term()
   314  	}
   315  	return nil
   316  }
   317  
   318  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   319  	bytecodes := make([][]byte, 0, len(hashes))
   320  	for _, h := range hashes {
   321  		bytecodes = append(bytecodes, getCodeByHash(h))
   322  	}
   323  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   324  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   325  		t.term()
   326  	}
   327  	return nil
   328  }
   329  
   330  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   331  	var size uint64
   332  	for _, account := range accounts {
   333  		// The first account might start from a different origin and end sooner
   334  		var originHash common.Hash
   335  		if len(origin) > 0 {
   336  			originHash = common.BytesToHash(origin)
   337  		}
   338  		limitHash := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   339  		if len(limit) > 0 {
   340  			limitHash = common.BytesToHash(limit)
   341  		}
   342  		var (
   343  			keys  []common.Hash
   344  			vals  [][]byte
   345  			abort bool
   346  		)
   347  		for _, entry := range t.storageValues[account] {
   348  			if size >= max {
   349  				abort = true
   350  				break
   351  			}
   352  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   353  				continue
   354  			}
   355  			keys = append(keys, common.BytesToHash(entry.k))
   356  			vals = append(vals, entry.v)
   357  			size += uint64(32 + len(entry.v))
   358  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   359  				break
   360  			}
   361  		}
   362  		hashes = append(hashes, keys)
   363  		slots = append(slots, vals)
   364  
   365  		// Generate the Merkle proofs for the first and last storage slot, but
   366  		// only if the response was capped. If the entire storage trie included
   367  		// in the response, no need for any proofs.
   368  		if originHash != (common.Hash{}) || abort {
   369  			// If we're aborting, we need to prove the first and last item
   370  			// This terminates the response (and thus the loop)
   371  			proof := NewNodeSet()
   372  			stTrie := t.storageTries[account]
   373  
   374  			// Here's a potential gotcha: when constructing the proof, we cannot
   375  			// use the 'origin' slice directly, but must use the full 32-byte
   376  			// hash form.
   377  			if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
   378  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   379  			}
   380  			if len(keys) > 0 {
   381  				lastK := (keys[len(keys)-1])[:]
   382  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   383  					t.logger.Error("Could not prove last item", "error", err)
   384  				}
   385  			}
   386  			for _, blob := range proof.NodeList() {
   387  				proofs = append(proofs, blob)
   388  			}
   389  			break
   390  		}
   391  	}
   392  	return hashes, slots, proofs
   393  }
   394  
   395  // createStorageRequestResponseAlwaysProve tests a corner case, where it always
   396  // supplies the proof for the last account, even if it is 'complete'.
   397  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   398  	var size uint64
   399  	max = max * 3 / 4
   400  
   401  	var origin common.Hash
   402  	if len(bOrigin) > 0 {
   403  		origin = common.BytesToHash(bOrigin)
   404  	}
   405  	var exit bool
   406  	for i, account := range accounts {
   407  		var keys []common.Hash
   408  		var vals [][]byte
   409  		for _, entry := range t.storageValues[account] {
   410  			if bytes.Compare(entry.k, origin[:]) < 0 {
   411  				exit = true
   412  			}
   413  			keys = append(keys, common.BytesToHash(entry.k))
   414  			vals = append(vals, entry.v)
   415  			size += uint64(32 + len(entry.v))
   416  			if size > max {
   417  				exit = true
   418  			}
   419  		}
   420  		if i == len(accounts)-1 {
   421  			exit = true
   422  		}
   423  		hashes = append(hashes, keys)
   424  		slots = append(slots, vals)
   425  
   426  		if exit {
   427  			// If we're aborting, we need to prove the first and last item
   428  			// This terminates the response (and thus the loop)
   429  			proof := NewNodeSet()
   430  			stTrie := t.storageTries[account]
   431  
   432  			// Here's a potential gotcha: when constructing the proof, we cannot
   433  			// use the 'origin' slice directly, but must use the full 32-byte
   434  			// hash form.
   435  			if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   436  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   437  					"error", err)
   438  			}
   439  			if len(keys) > 0 {
   440  				lastK := (keys[len(keys)-1])[:]
   441  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   442  					t.logger.Error("Could not prove last item", "error", err)
   443  				}
   444  			}
   445  			for _, blob := range proof.NodeList() {
   446  				proofs = append(proofs, blob)
   447  			}
   448  			break
   449  		}
   450  	}
   451  	return hashes, slots, proofs
   452  }
   453  
   454  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   455  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   456  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   457  	return nil
   458  }
   459  
   460  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   461  	return nil
   462  }
   463  
   464  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   465  	t.remote.OnTrieNodes(t, requestId, nil)
   466  	return nil
   467  }
   468  
   469  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   470  	return nil
   471  }
   472  
   473  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   474  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   475  	return nil
   476  }
   477  
   478  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   479  	return nil
   480  }
   481  
   482  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   483  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   484  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   485  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   486  		t.term()
   487  	}
   488  	return nil
   489  }
   490  
   491  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   492  //	var bytecodes [][]byte
   493  //	t.remote.OnByteCodes(t, id, bytecodes)
   494  //	return nil
   495  //}
   496  
   497  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   498  	bytecodes := make([][]byte, 0, len(hashes))
   499  	for _, h := range hashes {
   500  		// Send back the hashes
   501  		bytecodes = append(bytecodes, h[:])
   502  	}
   503  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   504  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   505  		// Mimic the real-life handler, which drops a peer on errors
   506  		t.remote.Unregister(t.id)
   507  	}
   508  	return nil
   509  }
   510  
   511  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   512  	bytecodes := make([][]byte, 0, 1)
   513  	for _, h := range hashes[:1] {
   514  		bytecodes = append(bytecodes, getCodeByHash(h))
   515  	}
   516  	// Missing bytecode can be retrieved again, no error expected
   517  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   518  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   519  		t.term()
   520  	}
   521  	return nil
   522  }
   523  
   524  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   525  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   526  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   527  }
   528  
   529  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   530  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   531  }
   532  
   533  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   534  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   535  //}
   536  
   537  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   538  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   539  	if len(proofs) > 0 {
   540  		proofs = proofs[1:]
   541  	}
   542  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   543  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   544  		// Mimic the real-life handler, which drops a peer on errors
   545  		t.remote.Unregister(t.id)
   546  	}
   547  	return nil
   548  }
   549  
   550  // corruptStorageRequestHandler doesn't provide good proofs
   551  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   552  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   553  	if len(proofs) > 0 {
   554  		proofs = proofs[1:]
   555  	}
   556  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   557  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   558  		// Mimic the real-life handler, which drops a peer on errors
   559  		t.remote.Unregister(t.id)
   560  	}
   561  	return nil
   562  }
   563  
   564  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   565  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   566  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   567  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   568  		// Mimic the real-life handler, which drops a peer on errors
   569  		t.remote.Unregister(t.id)
   570  	}
   571  	return nil
   572  }
   573  
   574  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   575  // also ship the entire trie inside the proof. If the attack is successful,
   576  // the remote side does not do any follow-up requests
   577  func TestSyncBloatedProof(t *testing.T) {
   578  	t.Parallel()
   579  
   580  	var (
   581  		once   sync.Once
   582  		cancel = make(chan struct{})
   583  		term   = func() {
   584  			once.Do(func() {
   585  				close(cancel)
   586  			})
   587  		}
   588  	)
   589  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   590  	source := newTestPeer("source", t, term)
   591  	source.accountTrie = sourceAccountTrie
   592  	source.accountValues = elems
   593  
   594  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   595  		var (
   596  			proofs [][]byte
   597  			keys   []common.Hash
   598  			vals   [][]byte
   599  		)
   600  		// The values
   601  		for _, entry := range t.accountValues {
   602  			if bytes.Compare(entry.k, origin[:]) < 0 {
   603  				continue
   604  			}
   605  			if bytes.Compare(entry.k, limit[:]) > 0 {
   606  				continue
   607  			}
   608  			keys = append(keys, common.BytesToHash(entry.k))
   609  			vals = append(vals, entry.v)
   610  		}
   611  		// The proofs
   612  		proof := NewNodeSet()
   613  		if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   614  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   615  		}
   616  		// The bloat: add proof of every single element
   617  		for _, entry := range t.accountValues {
   618  			if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
   619  				t.logger.Error("Could not prove item", "error", err)
   620  			}
   621  		}
   622  		// And remove one item from the elements
   623  		if len(keys) > 2 {
   624  			keys = append(keys[:1], keys[2:]...)
   625  			vals = append(vals[:1], vals[2:]...)
   626  		}
   627  		for _, blob := range proof.NodeList() {
   628  			proofs = append(proofs, blob)
   629  		}
   630  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   631  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   632  			t.term()
   633  			// This is actually correct, signal to exit the test successfully
   634  		}
   635  		return nil
   636  	}
   637  	syncer := setupSyncer(source)
   638  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   639  		t.Fatal("No error returned from incomplete/cancelled sync")
   640  	}
   641  }
   642  
   643  func setupSyncer(peers ...*testPeer) *Syncer {
   644  	stateDb := database.NewMemoryDBManager()
   645  	syncer := NewSyncer(stateDb)
   646  	for _, peer := range peers {
   647  		syncer.Register(peer)
   648  		peer.remote = syncer
   649  	}
   650  	return syncer
   651  }
   652  
   653  // TestSync tests a basic sync with one peer
   654  func TestSync(t *testing.T) {
   655  	t.Parallel()
   656  
   657  	var (
   658  		once   sync.Once
   659  		cancel = make(chan struct{})
   660  		term   = func() {
   661  			once.Do(func() {
   662  				close(cancel)
   663  			})
   664  		}
   665  	)
   666  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   667  
   668  	mkSource := func(name string) *testPeer {
   669  		source := newTestPeer(name, t, term)
   670  		source.accountTrie = sourceAccountTrie
   671  		source.accountValues = elems
   672  		return source
   673  	}
   674  	syncer := setupSyncer(mkSource("source"))
   675  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   676  		t.Fatalf("sync failed: %v", err)
   677  	}
   678  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   679  }
   680  
   681  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   682  // panic within the prover
   683  func TestSyncTinyTriePanic(t *testing.T) {
   684  	t.Parallel()
   685  
   686  	var (
   687  		once   sync.Once
   688  		cancel = make(chan struct{})
   689  		term   = func() {
   690  			once.Do(func() {
   691  				close(cancel)
   692  			})
   693  		}
   694  	)
   695  	sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
   696  
   697  	mkSource := func(name string) *testPeer {
   698  		source := newTestPeer(name, t, term)
   699  		source.accountTrie = sourceAccountTrie
   700  		source.accountValues = elems
   701  		return source
   702  	}
   703  	syncer := setupSyncer(mkSource("source"))
   704  	done := checkStall(t, term)
   705  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   706  		t.Fatalf("sync failed: %v", err)
   707  	}
   708  	close(done)
   709  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   710  }
   711  
   712  // TestMultiSync tests a basic sync with multiple peers
   713  func TestMultiSync(t *testing.T) {
   714  	t.Parallel()
   715  
   716  	var (
   717  		once   sync.Once
   718  		cancel = make(chan struct{})
   719  		term   = func() {
   720  			once.Do(func() {
   721  				close(cancel)
   722  			})
   723  		}
   724  	)
   725  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   726  
   727  	mkSource := func(name string) *testPeer {
   728  		source := newTestPeer(name, t, term)
   729  		source.accountTrie = sourceAccountTrie
   730  		source.accountValues = elems
   731  		return source
   732  	}
   733  	syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
   734  	done := checkStall(t, term)
   735  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   736  		t.Fatalf("sync failed: %v", err)
   737  	}
   738  	close(done)
   739  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   740  }
   741  
   742  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   743  func TestSyncWithStorage(t *testing.T) {
   744  	t.Parallel()
   745  
   746  	var (
   747  		once   sync.Once
   748  		cancel = make(chan struct{})
   749  		term   = func() {
   750  			once.Do(func() {
   751  				close(cancel)
   752  			})
   753  		}
   754  	)
   755  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
   756  
   757  	mkSource := func(name string) *testPeer {
   758  		source := newTestPeer(name, t, term)
   759  		source.accountTrie = sourceAccountTrie
   760  		source.accountValues = elems
   761  		source.storageTries = storageTries
   762  		source.storageValues = storageElems
   763  		return source
   764  	}
   765  	syncer := setupSyncer(mkSource("sourceA"))
   766  	done := checkStall(t, term)
   767  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   768  		t.Fatalf("sync failed: %v", err)
   769  	}
   770  	close(done)
   771  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   772  }
   773  
   774  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   775  func TestMultiSyncManyUseless(t *testing.T) {
   776  	t.Parallel()
   777  
   778  	var (
   779  		once   sync.Once
   780  		cancel = make(chan struct{})
   781  		term   = func() {
   782  			once.Do(func() {
   783  				close(cancel)
   784  			})
   785  		}
   786  	)
   787  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   788  
   789  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   790  		source := newTestPeer(name, t, term)
   791  		source.accountTrie = sourceAccountTrie
   792  		source.accountValues = elems
   793  		source.storageTries = storageTries
   794  		source.storageValues = storageElems
   795  
   796  		if !noAccount {
   797  			source.accountRequestHandler = emptyRequestAccountRangeFn
   798  		}
   799  		if !noStorage {
   800  			source.storageRequestHandler = emptyStorageRequestHandler
   801  		}
   802  		if !noTrieNode {
   803  			source.trieRequestHandler = emptyTrieRequestHandler
   804  		}
   805  		return source
   806  	}
   807  
   808  	syncer := setupSyncer(
   809  		mkSource("full", true, true, true),
   810  		mkSource("noAccounts", false, true, true),
   811  		mkSource("noStorage", true, false, true),
   812  		mkSource("noTrie", true, true, false),
   813  	)
   814  	done := checkStall(t, term)
   815  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   816  		t.Fatalf("sync failed: %v", err)
   817  	}
   818  	close(done)
   819  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   820  }
   821  
   822  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   823  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   824  	var (
   825  		once   sync.Once
   826  		cancel = make(chan struct{})
   827  		term   = func() {
   828  			once.Do(func() {
   829  				close(cancel)
   830  			})
   831  		}
   832  	)
   833  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   834  
   835  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   836  		source := newTestPeer(name, t, term)
   837  		source.accountTrie = sourceAccountTrie
   838  		source.accountValues = elems
   839  		source.storageTries = storageTries
   840  		source.storageValues = storageElems
   841  
   842  		if !noAccount {
   843  			source.accountRequestHandler = emptyRequestAccountRangeFn
   844  		}
   845  		if !noStorage {
   846  			source.storageRequestHandler = emptyStorageRequestHandler
   847  		}
   848  		if !noTrieNode {
   849  			source.trieRequestHandler = emptyTrieRequestHandler
   850  		}
   851  		return source
   852  	}
   853  
   854  	syncer := setupSyncer(
   855  		mkSource("full", true, true, true),
   856  		mkSource("noAccounts", false, true, true),
   857  		mkSource("noStorage", true, false, true),
   858  		mkSource("noTrie", true, true, false),
   859  	)
   860  	// We're setting the timeout to very low, to increase the chance of the timeout
   861  	// being triggered. This was previously a cause of panic, when a response
   862  	// arrived simultaneously as a timeout was triggered.
   863  	syncer.rates.OverrideTTLLimit = time.Millisecond
   864  
   865  	done := checkStall(t, term)
   866  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   867  		t.Fatalf("sync failed: %v", err)
   868  	}
   869  	close(done)
   870  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   871  }
   872  
   873  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   874  func TestMultiSyncManyUnresponsive(t *testing.T) {
   875  	var (
   876  		once   sync.Once
   877  		cancel = make(chan struct{})
   878  		term   = func() {
   879  			once.Do(func() {
   880  				close(cancel)
   881  			})
   882  		}
   883  	)
   884  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   885  
   886  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   887  		source := newTestPeer(name, t, term)
   888  		source.accountTrie = sourceAccountTrie
   889  		source.accountValues = elems
   890  		source.storageTries = storageTries
   891  		source.storageValues = storageElems
   892  
   893  		if !noAccount {
   894  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   895  		}
   896  		if !noStorage {
   897  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   898  		}
   899  		if !noTrieNode {
   900  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   901  		}
   902  		return source
   903  	}
   904  
   905  	syncer := setupSyncer(
   906  		mkSource("full", true, true, true),
   907  		mkSource("noAccounts", false, true, true),
   908  		mkSource("noStorage", true, false, true),
   909  		mkSource("noTrie", true, true, false),
   910  	)
   911  	// We're setting the timeout to very low, to make the test run a bit faster
   912  	syncer.rates.OverrideTTLLimit = time.Millisecond
   913  
   914  	done := checkStall(t, term)
   915  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   916  		t.Fatalf("sync failed: %v", err)
   917  	}
   918  	close(done)
   919  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   920  }
   921  
   922  func checkStall(t *testing.T, term func()) chan struct{} {
   923  	testDone := make(chan struct{})
   924  	go func() {
   925  		select {
   926  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   927  			t.Log("Sync stalled")
   928  			term()
   929  		case <-testDone:
   930  			return
   931  		}
   932  	}()
   933  	return testDone
   934  }
   935  
   936  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   937  // account trie has a few boundary elements.
   938  func TestSyncBoundaryAccountTrie(t *testing.T) {
   939  	t.Parallel()
   940  
   941  	var (
   942  		once   sync.Once
   943  		cancel = make(chan struct{})
   944  		term   = func() {
   945  			once.Do(func() {
   946  				close(cancel)
   947  			})
   948  		}
   949  	)
   950  	sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
   951  
   952  	mkSource := func(name string) *testPeer {
   953  		source := newTestPeer(name, t, term)
   954  		source.accountTrie = sourceAccountTrie
   955  		source.accountValues = elems
   956  		return source
   957  	}
   958  	syncer := setupSyncer(
   959  		mkSource("peer-a"),
   960  		mkSource("peer-b"),
   961  	)
   962  	done := checkStall(t, term)
   963  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   964  		t.Fatalf("sync failed: %v", err)
   965  	}
   966  	close(done)
   967  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   968  }
   969  
   970  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
   971  // consistently returning very small results
   972  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
   973  	t.Parallel()
   974  
   975  	var (
   976  		once   sync.Once
   977  		cancel = make(chan struct{})
   978  		term   = func() {
   979  			once.Do(func() {
   980  				close(cancel)
   981  			})
   982  		}
   983  	)
   984  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   985  
   986  	mkSource := func(name string, slow bool) *testPeer {
   987  		source := newTestPeer(name, t, term)
   988  		source.accountTrie = sourceAccountTrie
   989  		source.accountValues = elems
   990  
   991  		if slow {
   992  			source.accountRequestHandler = starvingAccountRequestHandler
   993  		}
   994  		return source
   995  	}
   996  
   997  	syncer := setupSyncer(
   998  		mkSource("nice-a", false),
   999  		mkSource("nice-b", false),
  1000  		mkSource("nice-c", false),
  1001  		mkSource("capped", true),
  1002  	)
  1003  	done := checkStall(t, term)
  1004  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1005  		t.Fatalf("sync failed: %v", err)
  1006  	}
  1007  	close(done)
  1008  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1009  }
  1010  
  1011  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  1012  // code requests properly.
  1013  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  1014  	t.Parallel()
  1015  
  1016  	var (
  1017  		once   sync.Once
  1018  		cancel = make(chan struct{})
  1019  		term   = func() {
  1020  			once.Do(func() {
  1021  				close(cancel)
  1022  			})
  1023  		}
  1024  	)
  1025  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1026  
  1027  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1028  		source := newTestPeer(name, t, term)
  1029  		source.accountTrie = sourceAccountTrie
  1030  		source.accountValues = elems
  1031  		source.codeRequestHandler = codeFn
  1032  		return source
  1033  	}
  1034  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1035  	// chance that the full set of codes requested are sent only to the
  1036  	// non-corrupt peer, which delivers everything in one go, and makes the
  1037  	// test moot
  1038  	syncer := setupSyncer(
  1039  		mkSource("capped", cappedCodeRequestHandler),
  1040  		mkSource("corrupt", corruptCodeRequestHandler),
  1041  	)
  1042  	done := checkStall(t, term)
  1043  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1044  		t.Fatalf("sync failed: %v", err)
  1045  	}
  1046  	close(done)
  1047  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1048  }
  1049  
  1050  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1051  	t.Parallel()
  1052  
  1053  	var (
  1054  		once   sync.Once
  1055  		cancel = make(chan struct{})
  1056  		term   = func() {
  1057  			once.Do(func() {
  1058  				close(cancel)
  1059  			})
  1060  		}
  1061  	)
  1062  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1063  
  1064  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1065  		source := newTestPeer(name, t, term)
  1066  		source.accountTrie = sourceAccountTrie
  1067  		source.accountValues = elems
  1068  		source.accountRequestHandler = accFn
  1069  		return source
  1070  	}
  1071  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1072  	// chance that the full set of codes requested are sent only to the
  1073  	// non-corrupt peer, which delivers everything in one go, and makes the
  1074  	// test moot
  1075  	syncer := setupSyncer(
  1076  		mkSource("capped", defaultAccountRequestHandler),
  1077  		mkSource("corrupt", corruptAccountRequestHandler),
  1078  	)
  1079  	done := checkStall(t, term)
  1080  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1081  		t.Fatalf("sync failed: %v", err)
  1082  	}
  1083  	close(done)
  1084  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1085  }
  1086  
  1087  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1088  // one by one
  1089  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1090  	t.Parallel()
  1091  
  1092  	var (
  1093  		once   sync.Once
  1094  		cancel = make(chan struct{})
  1095  		term   = func() {
  1096  			once.Do(func() {
  1097  				close(cancel)
  1098  			})
  1099  		}
  1100  	)
  1101  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1102  
  1103  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1104  		source := newTestPeer(name, t, term)
  1105  		source.accountTrie = sourceAccountTrie
  1106  		source.accountValues = elems
  1107  		source.codeRequestHandler = codeFn
  1108  		return source
  1109  	}
  1110  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1111  	// so it shouldn't be more than that
  1112  	var counter int
  1113  	syncer := setupSyncer(
  1114  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1115  			counter++
  1116  			return cappedCodeRequestHandler(t, id, hashes, max)
  1117  		}),
  1118  	)
  1119  	done := checkStall(t, term)
  1120  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1121  		t.Fatalf("sync failed: %v", err)
  1122  	}
  1123  	close(done)
  1124  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1125  	// deduplication is per request batch. If it were a perfect global dedup,
  1126  	// we would expect only 8 requests. If there were no dedup, there would be
  1127  	// 3k requests.
  1128  	// We expect somewhere below 100 requests for these 8 unique hashes.
  1129  	if threshold := 100; counter > threshold {
  1130  		t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
  1131  	}
  1132  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1133  }
  1134  
  1135  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1136  // storage trie has a few boundary elements.
  1137  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1138  	t.Parallel()
  1139  
  1140  	var (
  1141  		once   sync.Once
  1142  		cancel = make(chan struct{})
  1143  		term   = func() {
  1144  			once.Do(func() {
  1145  				close(cancel)
  1146  			})
  1147  		}
  1148  	)
  1149  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1150  
  1151  	mkSource := func(name string) *testPeer {
  1152  		source := newTestPeer(name, t, term)
  1153  		source.accountTrie = sourceAccountTrie
  1154  		source.accountValues = elems
  1155  		source.storageTries = storageTries
  1156  		source.storageValues = storageElems
  1157  		return source
  1158  	}
  1159  	syncer := setupSyncer(
  1160  		mkSource("peer-a"),
  1161  		mkSource("peer-b"),
  1162  	)
  1163  	done := checkStall(t, term)
  1164  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1165  		t.Fatalf("sync failed: %v", err)
  1166  	}
  1167  	close(done)
  1168  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1169  }
  1170  
  1171  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1172  // consistently returning very small results
  1173  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1174  	t.Parallel()
  1175  
  1176  	var (
  1177  		once   sync.Once
  1178  		cancel = make(chan struct{})
  1179  		term   = func() {
  1180  			once.Do(func() {
  1181  				close(cancel)
  1182  			})
  1183  		}
  1184  	)
  1185  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1186  
  1187  	mkSource := func(name string, slow bool) *testPeer {
  1188  		source := newTestPeer(name, t, term)
  1189  		source.accountTrie = sourceAccountTrie
  1190  		source.accountValues = elems
  1191  		source.storageTries = storageTries
  1192  		source.storageValues = storageElems
  1193  
  1194  		if slow {
  1195  			source.storageRequestHandler = starvingStorageRequestHandler
  1196  		}
  1197  		return source
  1198  	}
  1199  
  1200  	syncer := setupSyncer(
  1201  		mkSource("nice-a", false),
  1202  		mkSource("slow", true),
  1203  	)
  1204  	done := checkStall(t, term)
  1205  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1206  		t.Fatalf("sync failed: %v", err)
  1207  	}
  1208  	close(done)
  1209  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1210  }
  1211  
  1212  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1213  // sometimes sending bad proofs
  1214  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1215  	t.Parallel()
  1216  
  1217  	var (
  1218  		once   sync.Once
  1219  		cancel = make(chan struct{})
  1220  		term   = func() {
  1221  			once.Do(func() {
  1222  				close(cancel)
  1223  			})
  1224  		}
  1225  	)
  1226  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1227  
  1228  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1229  		source := newTestPeer(name, t, term)
  1230  		source.accountTrie = sourceAccountTrie
  1231  		source.accountValues = elems
  1232  		source.storageTries = storageTries
  1233  		source.storageValues = storageElems
  1234  		source.storageRequestHandler = handler
  1235  		return source
  1236  	}
  1237  
  1238  	syncer := setupSyncer(
  1239  		mkSource("nice-a", defaultStorageRequestHandler),
  1240  		mkSource("nice-b", defaultStorageRequestHandler),
  1241  		mkSource("nice-c", defaultStorageRequestHandler),
  1242  		mkSource("corrupt", corruptStorageRequestHandler),
  1243  	)
  1244  	done := checkStall(t, term)
  1245  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1246  		t.Fatalf("sync failed: %v", err)
  1247  	}
  1248  	close(done)
  1249  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1250  }
  1251  
  1252  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1253  	t.Parallel()
  1254  
  1255  	var (
  1256  		once   sync.Once
  1257  		cancel = make(chan struct{})
  1258  		term   = func() {
  1259  			once.Do(func() {
  1260  				close(cancel)
  1261  			})
  1262  		}
  1263  	)
  1264  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1265  
  1266  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1267  		source := newTestPeer(name, t, term)
  1268  		source.accountTrie = sourceAccountTrie
  1269  		source.accountValues = elems
  1270  		source.storageTries = storageTries
  1271  		source.storageValues = storageElems
  1272  		source.storageRequestHandler = handler
  1273  		return source
  1274  	}
  1275  	syncer := setupSyncer(
  1276  		mkSource("nice-a", defaultStorageRequestHandler),
  1277  		mkSource("nice-b", defaultStorageRequestHandler),
  1278  		mkSource("nice-c", defaultStorageRequestHandler),
  1279  		mkSource("corrupt", noProofStorageRequestHandler),
  1280  	)
  1281  	done := checkStall(t, term)
  1282  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1283  		t.Fatalf("sync failed: %v", err)
  1284  	}
  1285  	close(done)
  1286  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1287  }
  1288  
  1289  // TestSyncWithStorageMisbehavingProve tests basic sync using accounts + storage + code, against
  1290  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1291  // an error, where the recipient erroneously clipped the boundary nodes, but
  1292  // did not mark the account for healing.
  1293  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1294  	t.Parallel()
  1295  	var (
  1296  		once   sync.Once
  1297  		cancel = make(chan struct{})
  1298  		term   = func() {
  1299  			once.Do(func() {
  1300  				close(cancel)
  1301  			})
  1302  		}
  1303  	)
  1304  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1305  
  1306  	mkSource := func(name string) *testPeer {
  1307  		source := newTestPeer(name, t, term)
  1308  		source.accountTrie = sourceAccountTrie
  1309  		source.accountValues = elems
  1310  		source.storageTries = storageTries
  1311  		source.storageValues = storageElems
  1312  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1313  		return source
  1314  	}
  1315  	syncer := setupSyncer(mkSource("sourceA"))
  1316  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1317  		t.Fatalf("sync failed: %v", err)
  1318  	}
  1319  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1320  }
  1321  
  1322  type kv struct {
  1323  	k, v []byte
  1324  }
  1325  
  1326  // Some helpers for sorting
  1327  type entrySlice []*kv
  1328  
  1329  func (p entrySlice) Len() int           { return len(p) }
  1330  func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1331  func (p entrySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
  1332  
  1333  func key32(i uint64) []byte {
  1334  	key := make([]byte, 32)
  1335  	binary.LittleEndian.PutUint64(key, i)
  1336  	return key
  1337  }
  1338  
  1339  var codehashes = []common.Hash{
  1340  	crypto.Keccak256Hash([]byte{0}),
  1341  	crypto.Keccak256Hash([]byte{1}),
  1342  	crypto.Keccak256Hash([]byte{2}),
  1343  	crypto.Keccak256Hash([]byte{3}),
  1344  	crypto.Keccak256Hash([]byte{4}),
  1345  	crypto.Keccak256Hash([]byte{5}),
  1346  	crypto.Keccak256Hash([]byte{6}),
  1347  	crypto.Keccak256Hash([]byte{7}),
  1348  }
  1349  
  1350  // getCodeHash returns a pseudo-random code hash
  1351  func getCodeHash(i uint64) []byte {
  1352  	h := codehashes[int(i)%len(codehashes)]
  1353  	return common.CopyBytes(h[:])
  1354  }
  1355  
  1356  // getCodeByHash convenience function to lookup the code from the code hash
  1357  func getCodeByHash(hash common.Hash) []byte {
  1358  	if hash == emptyCode {
  1359  		return nil
  1360  	}
  1361  	for i, h := range codehashes {
  1362  		if h == hash {
  1363  			return []byte{byte(i)}
  1364  		}
  1365  	}
  1366  	return nil
  1367  }
  1368  
  1369  // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1370  func makeAccountTrieNoStorage(n int) (*statedb.Trie, entrySlice) {
  1371  	db := statedb.NewDatabase(database.NewMemoryDBManager())
  1372  	accTrie, _ := statedb.NewTrie(common.Hash{}, db, nil)
  1373  	var entries entrySlice
  1374  	for i := uint64(1); i <= uint64(n); i++ {
  1375  		acc, _ := genExternallyOwnedAccount(i, big.NewInt(int64(i)))
  1376  		serializer := account.NewAccountSerializerWithAccount(acc)
  1377  		value, _ := rlp.EncodeToBytes(serializer)
  1378  		key := key32(i)
  1379  		elem := &kv{key, value}
  1380  		accTrie.Update(elem.k, elem.v)
  1381  		entries = append(entries, elem)
  1382  	}
  1383  	sort.Sort(entries)
  1384  	accTrie.Commit(nil)
  1385  	return accTrie, entries
  1386  }
  1387  
  1388  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1389  // accounts normally, this function will fill a few accounts which have
  1390  // boundary hash.
  1391  func makeBoundaryAccountTrie(n int) (*statedb.Trie, entrySlice) {
  1392  	var (
  1393  		entries    entrySlice
  1394  		boundaries []common.Hash
  1395  
  1396  		db      = statedb.NewDatabase(database.NewMemoryDBManager())
  1397  		trie, _ = statedb.NewTrie(common.Hash{}, db, nil)
  1398  	)
  1399  	// Initialize boundaries
  1400  	var next common.Hash
  1401  	step := new(big.Int).Sub(
  1402  		new(big.Int).Div(
  1403  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1404  			big.NewInt(int64(accountConcurrency)),
  1405  		), common.Big1,
  1406  	)
  1407  	for i := 0; i < accountConcurrency; i++ {
  1408  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1409  		if i == accountConcurrency-1 {
  1410  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1411  		}
  1412  		boundaries = append(boundaries, last)
  1413  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1414  	}
  1415  	// Fill boundary accounts
  1416  	for i := 0; i < len(boundaries); i++ {
  1417  		acc, _ := genSmartContractAccount(uint64(0), big.NewInt(int64(i)), emptyRoot, getCodeHash(uint64(i)))
  1418  		serializer := account.NewAccountSerializerWithAccount(acc)
  1419  		value, _ := rlp.EncodeToBytes(serializer)
  1420  		elem := &kv{boundaries[i].Bytes(), value}
  1421  		trie.Update(elem.k, elem.v)
  1422  		entries = append(entries, elem)
  1423  	}
  1424  	// Fill other accounts if required
  1425  	for i := uint64(1); i <= uint64(n); i++ {
  1426  		acc, _ := genSmartContractAccount(i, big.NewInt(int64(i)), emptyRoot, getCodeHash(i))
  1427  		serializer := account.NewAccountSerializerWithAccount(acc)
  1428  		value, _ := rlp.EncodeToBytes(serializer)
  1429  		elem := &kv{key32(i), value}
  1430  		trie.Update(elem.k, elem.v)
  1431  		entries = append(entries, elem)
  1432  	}
  1433  	sort.Sort(entries)
  1434  	trie.Commit(nil)
  1435  	return trie, entries
  1436  }
  1437  
  1438  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1439  // has a unique storage set.
  1440  func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*statedb.Trie, entrySlice, map[common.Hash]*statedb.Trie, map[common.Hash]entrySlice) {
  1441  	var (
  1442  		db             = statedb.NewDatabase(database.NewMemoryDBManager())
  1443  		accTrie, _     = statedb.NewTrie(common.Hash{}, db, nil)
  1444  		entries        entrySlice
  1445  		storageTries   = make(map[common.Hash]*statedb.Trie)
  1446  		storageEntries = make(map[common.Hash]entrySlice)
  1447  	)
  1448  	// Create n accounts in the trie
  1449  	for i := uint64(1); i <= uint64(accounts); i++ {
  1450  		key := key32(i)
  1451  		codehash := emptyCode[:]
  1452  		if code {
  1453  			codehash = getCodeHash(i)
  1454  		}
  1455  		// Create a storage trie
  1456  		stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
  1457  		stRoot := stTrie.Hash()
  1458  		stTrie.Commit(nil)
  1459  		acc, _ := genSmartContractAccount(i, big.NewInt(int64(i)), stRoot, codehash)
  1460  		serializer := account.NewAccountSerializerWithAccount(acc)
  1461  		value, _ := rlp.EncodeToBytes(serializer)
  1462  		elem := &kv{key, value}
  1463  		accTrie.Update(elem.k, elem.v)
  1464  		entries = append(entries, elem)
  1465  
  1466  		storageTries[common.BytesToHash(key)] = stTrie
  1467  		storageEntries[common.BytesToHash(key)] = stEntries
  1468  	}
  1469  	sort.Sort(entries)
  1470  
  1471  	accTrie.Commit(nil)
  1472  	return accTrie, entries, storageTries, storageEntries
  1473  }
  1474  
  1475  // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1476  func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*statedb.Trie, entrySlice, map[common.Hash]*statedb.Trie, map[common.Hash]entrySlice) {
  1477  	var (
  1478  		db             = statedb.NewDatabase(database.NewMemoryDBManager())
  1479  		accTrie, _     = statedb.NewTrie(common.Hash{}, db, nil)
  1480  		entries        entrySlice
  1481  		storageTries   = make(map[common.Hash]*statedb.Trie)
  1482  		storageEntries = make(map[common.Hash]entrySlice)
  1483  	)
  1484  	// Make a storage trie which we reuse for the whole lot
  1485  	var (
  1486  		stTrie    *statedb.Trie
  1487  		stEntries entrySlice
  1488  	)
  1489  	if boundary {
  1490  		stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
  1491  	} else {
  1492  		stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
  1493  	}
  1494  	stRoot := stTrie.Hash()
  1495  
  1496  	// Create n accounts in the trie
  1497  	for i := uint64(1); i <= uint64(accounts); i++ {
  1498  		key := key32(i)
  1499  		codehash := emptyCode[:]
  1500  		if code {
  1501  			codehash = getCodeHash(i)
  1502  		}
  1503  		acc, _ := genSmartContractAccount(i, big.NewInt(int64(i)), stRoot, codehash)
  1504  		serializer := account.NewAccountSerializerWithAccount(acc)
  1505  		value, _ := rlp.EncodeToBytes(serializer)
  1506  		elem := &kv{key, value}
  1507  		accTrie.Update(elem.k, elem.v)
  1508  		entries = append(entries, elem)
  1509  		// we reuse the same one for all accounts
  1510  		storageTries[common.BytesToHash(key)] = stTrie
  1511  		storageEntries[common.BytesToHash(key)] = stEntries
  1512  	}
  1513  	sort.Sort(entries)
  1514  	stTrie.Commit(nil)
  1515  	accTrie.Commit(nil)
  1516  	return accTrie, entries, storageTries, storageEntries
  1517  }
  1518  
  1519  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1520  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1521  // that tries are unique.
  1522  func makeStorageTrieWithSeed(n, seed uint64, db *statedb.Database) (*statedb.Trie, entrySlice) {
  1523  	trie, _ := statedb.NewTrie(common.Hash{}, db, nil)
  1524  	var entries entrySlice
  1525  	for i := uint64(1); i <= n; i++ {
  1526  		// store 'x' at slot 'x'
  1527  		slotValue := key32(i + seed)
  1528  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1529  
  1530  		slotKey := key32(i)
  1531  		key := crypto.Keccak256Hash(slotKey[:])
  1532  
  1533  		elem := &kv{key[:], rlpSlotValue}
  1534  		trie.Update(elem.k, elem.v)
  1535  		entries = append(entries, elem)
  1536  	}
  1537  	sort.Sort(entries)
  1538  	trie.Commit(nil)
  1539  	return trie, entries
  1540  }
  1541  
  1542  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1543  // storage slots normally, this function will fill a few slots which have
  1544  // boundary hash.
  1545  func makeBoundaryStorageTrie(n int, db *statedb.Database) (*statedb.Trie, entrySlice) {
  1546  	var (
  1547  		entries    entrySlice
  1548  		boundaries []common.Hash
  1549  		trie, _    = statedb.NewTrie(common.Hash{}, db, nil)
  1550  	)
  1551  	// Initialize boundaries
  1552  	var next common.Hash
  1553  	step := new(big.Int).Sub(
  1554  		new(big.Int).Div(
  1555  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1556  			big.NewInt(int64(accountConcurrency)),
  1557  		), common.Big1,
  1558  	)
  1559  	for i := 0; i < accountConcurrency; i++ {
  1560  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1561  		if i == accountConcurrency-1 {
  1562  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1563  		}
  1564  		boundaries = append(boundaries, last)
  1565  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1566  	}
  1567  	// Fill boundary slots
  1568  	for i := 0; i < len(boundaries); i++ {
  1569  		key := boundaries[i]
  1570  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1571  
  1572  		elem := &kv{key[:], val}
  1573  		trie.Update(elem.k, elem.v)
  1574  		entries = append(entries, elem)
  1575  	}
  1576  	// Fill other slots if required
  1577  	for i := uint64(1); i <= uint64(n); i++ {
  1578  		slotKey := key32(i)
  1579  		key := crypto.Keccak256Hash(slotKey[:])
  1580  
  1581  		slotValue := key32(i)
  1582  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1583  
  1584  		elem := &kv{key[:], rlpSlotValue}
  1585  		trie.Update(elem.k, elem.v)
  1586  		entries = append(entries, elem)
  1587  	}
  1588  	sort.Sort(entries)
  1589  	trie.Commit(nil)
  1590  	return trie, entries
  1591  }
  1592  
  1593  func verifyTrie(db database.DBManager, root common.Hash, t *testing.T) {
  1594  	t.Helper()
  1595  	triedb := statedb.NewDatabase(db)
  1596  	accTrie, err := statedb.NewTrie(root, triedb, nil)
  1597  	if err != nil {
  1598  		t.Fatal(err)
  1599  	}
  1600  	accounts, slots := 0, 0
  1601  	accIt := statedb.NewIterator(accTrie.NodeIterator(nil))
  1602  	for accIt.Next() {
  1603  		serializer := account.NewAccountSerializer()
  1604  		if err := rlp.DecodeBytes(accIt.Value, serializer); err != nil {
  1605  			logger.Crit("Invalid account encountered during snapshot creation", "err", err)
  1606  		}
  1607  		acc := serializer.GetAccount()
  1608  		pacc := account.GetProgramAccount(acc)
  1609  		accounts++
  1610  		if pacc != nil && pacc.GetStorageRoot().Unextend() != emptyRoot {
  1611  			storeTrie, err := statedb.NewSecureStorageTrie(pacc.GetStorageRoot(), triedb, nil)
  1612  			if err != nil {
  1613  				t.Fatal(err)
  1614  			}
  1615  			storeIt := statedb.NewIterator(storeTrie.NodeIterator(nil))
  1616  			for storeIt.Next() {
  1617  				slots++
  1618  			}
  1619  			if err := storeIt.Err; err != nil {
  1620  				t.Fatal(err)
  1621  			}
  1622  		}
  1623  	}
  1624  	if err := accIt.Err; err != nil {
  1625  		t.Fatal(err)
  1626  	}
  1627  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1628  }
  1629  
  1630  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1631  // state healing
  1632  func TestSyncAccountPerformance(t *testing.T) {
  1633  	// Set the account concurrency to 1. This _should_ result in the
  1634  	// range root to become correct, and there should be no healing needed
  1635  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1636  	accountConcurrency = 1
  1637  
  1638  	var (
  1639  		once   sync.Once
  1640  		cancel = make(chan struct{})
  1641  		term   = func() {
  1642  			once.Do(func() {
  1643  				close(cancel)
  1644  			})
  1645  		}
  1646  	)
  1647  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  1648  
  1649  	mkSource := func(name string) *testPeer {
  1650  		source := newTestPeer(name, t, term)
  1651  		source.accountTrie = sourceAccountTrie
  1652  		source.accountValues = elems
  1653  		return source
  1654  	}
  1655  	src := mkSource("source")
  1656  	syncer := setupSyncer(src)
  1657  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1658  		t.Fatalf("sync failed: %v", err)
  1659  	}
  1660  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1661  	// The trie root will always be requested, since it is added when the snap
  1662  	// sync cycle starts. When popping the queue, we do not look it up again.
  1663  	// Doing so would bring this number down to zero in this artificial testcase,
  1664  	// but only add extra IO for no reason in practice.
  1665  	if have, want := src.nTrienodeRequests, 1; have != want {
  1666  		fmt.Printf(src.Stats())
  1667  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1668  	}
  1669  }
  1670  
  1671  func TestSlotEstimation(t *testing.T) {
  1672  	for i, tc := range []struct {
  1673  		last  common.Hash
  1674  		count int
  1675  		want  uint64
  1676  	}{
  1677  		{
  1678  			// Half the space
  1679  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1680  			100,
  1681  			100,
  1682  		},
  1683  		{
  1684  			// 1 / 16th
  1685  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1686  			100,
  1687  			1500,
  1688  		},
  1689  		{
  1690  			// Bit more than 1 / 16th
  1691  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1692  			100,
  1693  			1499,
  1694  		},
  1695  		{
  1696  			// Almost everything
  1697  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1698  			100,
  1699  			6,
  1700  		},
  1701  		{
  1702  			// Almost nothing -- should lead to error
  1703  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1704  			1,
  1705  			0,
  1706  		},
  1707  		{
  1708  			// Nothing -- should lead to error
  1709  			common.Hash{},
  1710  			100,
  1711  			0,
  1712  		},
  1713  	} {
  1714  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1715  		if want := tc.want; have != want {
  1716  			t.Errorf("test %d: have %d want %d", i, have, want)
  1717  		}
  1718  	}
  1719  }