github.com/ethw3/go-ethereuma@v0.0.0-20221013053120-c14602a4c23c/eth/protocols/snap/sync_test.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	"sort"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethw3/go-ethereuma/common"
    31  	"github.com/ethw3/go-ethereuma/core/rawdb"
    32  	"github.com/ethw3/go-ethereuma/core/types"
    33  	"github.com/ethw3/go-ethereuma/crypto"
    34  	"github.com/ethw3/go-ethereuma/ethdb"
    35  	"github.com/ethw3/go-ethereuma/light"
    36  	"github.com/ethw3/go-ethereuma/log"
    37  	"github.com/ethw3/go-ethereuma/rlp"
    38  	"github.com/ethw3/go-ethereuma/trie"
    39  	"golang.org/x/crypto/sha3"
    40  )
    41  
    42  func TestHashing(t *testing.T) {
    43  	t.Parallel()
    44  
    45  	var bytecodes = make([][]byte, 10)
    46  	for i := 0; i < len(bytecodes); i++ {
    47  		buf := make([]byte, 100)
    48  		rand.Read(buf)
    49  		bytecodes[i] = buf
    50  	}
    51  	var want, got string
    52  	var old = func() {
    53  		hasher := sha3.NewLegacyKeccak256()
    54  		for i := 0; i < len(bytecodes); i++ {
    55  			hasher.Reset()
    56  			hasher.Write(bytecodes[i])
    57  			hash := hasher.Sum(nil)
    58  			got = fmt.Sprintf("%v\n%v", got, hash)
    59  		}
    60  	}
    61  	var new = func() {
    62  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    63  		var hash = make([]byte, 32)
    64  		for i := 0; i < len(bytecodes); i++ {
    65  			hasher.Reset()
    66  			hasher.Write(bytecodes[i])
    67  			hasher.Read(hash)
    68  			want = fmt.Sprintf("%v\n%v", want, hash)
    69  		}
    70  	}
    71  	old()
    72  	new()
    73  	if want != got {
    74  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    75  	}
    76  }
    77  
    78  func BenchmarkHashing(b *testing.B) {
    79  	var bytecodes = make([][]byte, 10000)
    80  	for i := 0; i < len(bytecodes); i++ {
    81  		buf := make([]byte, 100)
    82  		rand.Read(buf)
    83  		bytecodes[i] = buf
    84  	}
    85  	var old = func() {
    86  		hasher := sha3.NewLegacyKeccak256()
    87  		for i := 0; i < len(bytecodes); i++ {
    88  			hasher.Reset()
    89  			hasher.Write(bytecodes[i])
    90  			hasher.Sum(nil)
    91  		}
    92  	}
    93  	var new = func() {
    94  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    95  		var hash = make([]byte, 32)
    96  		for i := 0; i < len(bytecodes); i++ {
    97  			hasher.Reset()
    98  			hasher.Write(bytecodes[i])
    99  			hasher.Read(hash)
   100  		}
   101  	}
   102  	b.Run("old", func(b *testing.B) {
   103  		b.ReportAllocs()
   104  		for i := 0; i < b.N; i++ {
   105  			old()
   106  		}
   107  	})
   108  	b.Run("new", func(b *testing.B) {
   109  		b.ReportAllocs()
   110  		for i := 0; i < b.N; i++ {
   111  			new()
   112  		}
   113  	})
   114  }
   115  
   116  type (
   117  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   118  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   119  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   120  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   121  )
   122  
   123  type testPeer struct {
   124  	id            string
   125  	test          *testing.T
   126  	remote        *Syncer
   127  	logger        log.Logger
   128  	accountTrie   *trie.Trie
   129  	accountValues entrySlice
   130  	storageTries  map[common.Hash]*trie.Trie
   131  	storageValues map[common.Hash]entrySlice
   132  
   133  	accountRequestHandler accountHandlerFunc
   134  	storageRequestHandler storageHandlerFunc
   135  	trieRequestHandler    trieHandlerFunc
   136  	codeRequestHandler    codeHandlerFunc
   137  	term                  func()
   138  
   139  	// counters
   140  	nAccountRequests  int
   141  	nStorageRequests  int
   142  	nBytecodeRequests int
   143  	nTrienodeRequests int
   144  }
   145  
   146  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   147  	peer := &testPeer{
   148  		id:                    id,
   149  		test:                  t,
   150  		logger:                log.New("id", id),
   151  		accountRequestHandler: defaultAccountRequestHandler,
   152  		trieRequestHandler:    defaultTrieRequestHandler,
   153  		storageRequestHandler: defaultStorageRequestHandler,
   154  		codeRequestHandler:    defaultCodeRequestHandler,
   155  		term:                  term,
   156  	}
   157  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   158  	//peer.logger.SetHandler(stderrHandler)
   159  	return peer
   160  }
   161  
   162  func (t *testPeer) ID() string      { return t.id }
   163  func (t *testPeer) Log() log.Logger { return t.logger }
   164  
   165  func (t *testPeer) Stats() string {
   166  	return fmt.Sprintf(`Account requests: %d
   167  Storage requests: %d
   168  Bytecode requests: %d
   169  Trienode requests: %d
   170  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   171  }
   172  
   173  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   174  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   175  	t.nAccountRequests++
   176  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   177  	return nil
   178  }
   179  
   180  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   181  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   182  	t.nTrienodeRequests++
   183  	go t.trieRequestHandler(t, id, root, paths, bytes)
   184  	return nil
   185  }
   186  
   187  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   188  	t.nStorageRequests++
   189  	if len(accounts) == 1 && origin != nil {
   190  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   191  	} else {
   192  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   193  	}
   194  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   195  	return nil
   196  }
   197  
   198  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   199  	t.nBytecodeRequests++
   200  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   201  	go t.codeRequestHandler(t, id, hashes, bytes)
   202  	return nil
   203  }
   204  
   205  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   206  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   207  	// Pass the response
   208  	var nodes [][]byte
   209  	for _, pathset := range paths {
   210  		switch len(pathset) {
   211  		case 1:
   212  			blob, _, err := t.accountTrie.TryGetNode(pathset[0])
   213  			if err != nil {
   214  				t.logger.Info("Error handling req", "error", err)
   215  				break
   216  			}
   217  			nodes = append(nodes, blob)
   218  		default:
   219  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   220  			for _, path := range pathset[1:] {
   221  				blob, _, err := account.TryGetNode(path)
   222  				if err != nil {
   223  					t.logger.Info("Error handling req", "error", err)
   224  					break
   225  				}
   226  				nodes = append(nodes, blob)
   227  			}
   228  		}
   229  	}
   230  	t.remote.OnTrieNodes(t, requestId, nodes)
   231  	return nil
   232  }
   233  
   234  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   235  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   236  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   237  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   238  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   239  		t.term()
   240  		return err
   241  	}
   242  	return nil
   243  }
   244  
   245  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   246  	var size uint64
   247  	if limit == (common.Hash{}) {
   248  		limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   249  	}
   250  	for _, entry := range t.accountValues {
   251  		if size > cap {
   252  			break
   253  		}
   254  		if bytes.Compare(origin[:], entry.k) <= 0 {
   255  			keys = append(keys, common.BytesToHash(entry.k))
   256  			vals = append(vals, entry.v)
   257  			size += uint64(32 + len(entry.v))
   258  		}
   259  		// If we've exceeded the request threshold, abort
   260  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   261  			break
   262  		}
   263  	}
   264  	// Unless we send the entire trie, we need to supply proofs
   265  	// Actually, we need to supply proofs either way! This seems to be an implementation
   266  	// quirk in go-ethereum
   267  	proof := light.NewNodeSet()
   268  	if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   269  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   270  	}
   271  	if len(keys) > 0 {
   272  		lastK := (keys[len(keys)-1])[:]
   273  		if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
   274  			t.logger.Error("Could not prove last item", "error", err)
   275  		}
   276  	}
   277  	for _, blob := range proof.NodeList() {
   278  		proofs = append(proofs, blob)
   279  	}
   280  	return keys, vals, proofs
   281  }
   282  
   283  // defaultStorageRequestHandler is a well-behaving storage request handler
   284  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   285  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   286  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   287  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   288  		t.term()
   289  	}
   290  	return nil
   291  }
   292  
   293  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   294  	var bytecodes [][]byte
   295  	for _, h := range hashes {
   296  		bytecodes = append(bytecodes, getCodeByHash(h))
   297  	}
   298  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   299  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   300  		t.term()
   301  	}
   302  	return nil
   303  }
   304  
   305  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   306  	var size uint64
   307  	for _, account := range accounts {
   308  		// The first account might start from a different origin and end sooner
   309  		var originHash common.Hash
   310  		if len(origin) > 0 {
   311  			originHash = common.BytesToHash(origin)
   312  		}
   313  		var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   314  		if len(limit) > 0 {
   315  			limitHash = common.BytesToHash(limit)
   316  		}
   317  		var (
   318  			keys  []common.Hash
   319  			vals  [][]byte
   320  			abort bool
   321  		)
   322  		for _, entry := range t.storageValues[account] {
   323  			if size >= max {
   324  				abort = true
   325  				break
   326  			}
   327  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   328  				continue
   329  			}
   330  			keys = append(keys, common.BytesToHash(entry.k))
   331  			vals = append(vals, entry.v)
   332  			size += uint64(32 + len(entry.v))
   333  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   334  				break
   335  			}
   336  		}
   337  		if len(keys) > 0 {
   338  			hashes = append(hashes, keys)
   339  			slots = append(slots, vals)
   340  		}
   341  		// Generate the Merkle proofs for the first and last storage slot, but
   342  		// only if the response was capped. If the entire storage trie included
   343  		// in the response, no need for any proofs.
   344  		if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
   345  			// If we're aborting, we need to prove the first and last item
   346  			// This terminates the response (and thus the loop)
   347  			proof := light.NewNodeSet()
   348  			stTrie := t.storageTries[account]
   349  
   350  			// Here's a potential gotcha: when constructing the proof, we cannot
   351  			// use the 'origin' slice directly, but must use the full 32-byte
   352  			// hash form.
   353  			if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
   354  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   355  			}
   356  			if len(keys) > 0 {
   357  				lastK := (keys[len(keys)-1])[:]
   358  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   359  					t.logger.Error("Could not prove last item", "error", err)
   360  				}
   361  			}
   362  			for _, blob := range proof.NodeList() {
   363  				proofs = append(proofs, blob)
   364  			}
   365  			break
   366  		}
   367  	}
   368  	return hashes, slots, proofs
   369  }
   370  
   371  //  the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
   372  // supplies the proof for the last account, even if it is 'complete'.h
   373  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   374  	var size uint64
   375  	max = max * 3 / 4
   376  
   377  	var origin common.Hash
   378  	if len(bOrigin) > 0 {
   379  		origin = common.BytesToHash(bOrigin)
   380  	}
   381  	var exit bool
   382  	for i, account := range accounts {
   383  		var keys []common.Hash
   384  		var vals [][]byte
   385  		for _, entry := range t.storageValues[account] {
   386  			if bytes.Compare(entry.k, origin[:]) < 0 {
   387  				exit = true
   388  			}
   389  			keys = append(keys, common.BytesToHash(entry.k))
   390  			vals = append(vals, entry.v)
   391  			size += uint64(32 + len(entry.v))
   392  			if size > max {
   393  				exit = true
   394  			}
   395  		}
   396  		if i == len(accounts)-1 {
   397  			exit = true
   398  		}
   399  		hashes = append(hashes, keys)
   400  		slots = append(slots, vals)
   401  
   402  		if exit {
   403  			// If we're aborting, we need to prove the first and last item
   404  			// This terminates the response (and thus the loop)
   405  			proof := light.NewNodeSet()
   406  			stTrie := t.storageTries[account]
   407  
   408  			// Here's a potential gotcha: when constructing the proof, we cannot
   409  			// use the 'origin' slice directly, but must use the full 32-byte
   410  			// hash form.
   411  			if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   412  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   413  					"error", err)
   414  			}
   415  			if len(keys) > 0 {
   416  				lastK := (keys[len(keys)-1])[:]
   417  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   418  					t.logger.Error("Could not prove last item", "error", err)
   419  				}
   420  			}
   421  			for _, blob := range proof.NodeList() {
   422  				proofs = append(proofs, blob)
   423  			}
   424  			break
   425  		}
   426  	}
   427  	return hashes, slots, proofs
   428  }
   429  
   430  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   431  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   432  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   433  	return nil
   434  }
   435  
   436  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   437  	return nil
   438  }
   439  
   440  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   441  	t.remote.OnTrieNodes(t, requestId, nil)
   442  	return nil
   443  }
   444  
   445  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   446  	return nil
   447  }
   448  
   449  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   450  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   451  	return nil
   452  }
   453  
   454  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   455  	return nil
   456  }
   457  
   458  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   459  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   460  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   461  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   462  		t.term()
   463  	}
   464  	return nil
   465  }
   466  
   467  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   468  //	var bytecodes [][]byte
   469  //	t.remote.OnByteCodes(t, id, bytecodes)
   470  //	return nil
   471  //}
   472  
   473  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   474  	var bytecodes [][]byte
   475  	for _, h := range hashes {
   476  		// Send back the hashes
   477  		bytecodes = append(bytecodes, h[:])
   478  	}
   479  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   480  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   481  		// Mimic the real-life handler, which drops a peer on errors
   482  		t.remote.Unregister(t.id)
   483  	}
   484  	return nil
   485  }
   486  
   487  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   488  	var bytecodes [][]byte
   489  	for _, h := range hashes[:1] {
   490  		bytecodes = append(bytecodes, getCodeByHash(h))
   491  	}
   492  	// Missing bytecode can be retrieved again, no error expected
   493  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   494  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   495  		t.term()
   496  	}
   497  	return nil
   498  }
   499  
   500  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   501  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   502  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   503  }
   504  
   505  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   506  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   507  }
   508  
   509  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   510  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   511  //}
   512  
   513  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   514  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   515  	if len(proofs) > 0 {
   516  		proofs = proofs[1:]
   517  	}
   518  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   519  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   520  		// Mimic the real-life handler, which drops a peer on errors
   521  		t.remote.Unregister(t.id)
   522  	}
   523  	return nil
   524  }
   525  
   526  // corruptStorageRequestHandler doesn't provide good proofs
   527  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   528  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   529  	if len(proofs) > 0 {
   530  		proofs = proofs[1:]
   531  	}
   532  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   533  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   534  		// Mimic the real-life handler, which drops a peer on errors
   535  		t.remote.Unregister(t.id)
   536  	}
   537  	return nil
   538  }
   539  
   540  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   541  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   542  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   543  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   544  		// Mimic the real-life handler, which drops a peer on errors
   545  		t.remote.Unregister(t.id)
   546  	}
   547  	return nil
   548  }
   549  
   550  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   551  // also ship the entire trie inside the proof. If the attack is successful,
   552  // the remote side does not do any follow-up requests
   553  func TestSyncBloatedProof(t *testing.T) {
   554  	t.Parallel()
   555  
   556  	var (
   557  		once   sync.Once
   558  		cancel = make(chan struct{})
   559  		term   = func() {
   560  			once.Do(func() {
   561  				close(cancel)
   562  			})
   563  		}
   564  	)
   565  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   566  	source := newTestPeer("source", t, term)
   567  	source.accountTrie = sourceAccountTrie
   568  	source.accountValues = elems
   569  
   570  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   571  		var (
   572  			proofs [][]byte
   573  			keys   []common.Hash
   574  			vals   [][]byte
   575  		)
   576  		// The values
   577  		for _, entry := range t.accountValues {
   578  			if bytes.Compare(entry.k, origin[:]) < 0 {
   579  				continue
   580  			}
   581  			if bytes.Compare(entry.k, limit[:]) > 0 {
   582  				continue
   583  			}
   584  			keys = append(keys, common.BytesToHash(entry.k))
   585  			vals = append(vals, entry.v)
   586  		}
   587  		// The proofs
   588  		proof := light.NewNodeSet()
   589  		if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   590  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   591  		}
   592  		// The bloat: add proof of every single element
   593  		for _, entry := range t.accountValues {
   594  			if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
   595  				t.logger.Error("Could not prove item", "error", err)
   596  			}
   597  		}
   598  		// And remove one item from the elements
   599  		if len(keys) > 2 {
   600  			keys = append(keys[:1], keys[2:]...)
   601  			vals = append(vals[:1], vals[2:]...)
   602  		}
   603  		for _, blob := range proof.NodeList() {
   604  			proofs = append(proofs, blob)
   605  		}
   606  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   607  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   608  			t.term()
   609  			// This is actually correct, signal to exit the test successfully
   610  		}
   611  		return nil
   612  	}
   613  	syncer := setupSyncer(source)
   614  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   615  		t.Fatal("No error returned from incomplete/cancelled sync")
   616  	}
   617  }
   618  
   619  func setupSyncer(peers ...*testPeer) *Syncer {
   620  	stateDb := rawdb.NewMemoryDatabase()
   621  	syncer := NewSyncer(stateDb)
   622  	for _, peer := range peers {
   623  		syncer.Register(peer)
   624  		peer.remote = syncer
   625  	}
   626  	return syncer
   627  }
   628  
   629  // TestSync tests a basic sync with one peer
   630  func TestSync(t *testing.T) {
   631  	t.Parallel()
   632  
   633  	var (
   634  		once   sync.Once
   635  		cancel = make(chan struct{})
   636  		term   = func() {
   637  			once.Do(func() {
   638  				close(cancel)
   639  			})
   640  		}
   641  	)
   642  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   643  
   644  	mkSource := func(name string) *testPeer {
   645  		source := newTestPeer(name, t, term)
   646  		source.accountTrie = sourceAccountTrie
   647  		source.accountValues = elems
   648  		return source
   649  	}
   650  	syncer := setupSyncer(mkSource("source"))
   651  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   652  		t.Fatalf("sync failed: %v", err)
   653  	}
   654  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   655  }
   656  
   657  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   658  // panic within the prover
   659  func TestSyncTinyTriePanic(t *testing.T) {
   660  	t.Parallel()
   661  
   662  	var (
   663  		once   sync.Once
   664  		cancel = make(chan struct{})
   665  		term   = func() {
   666  			once.Do(func() {
   667  				close(cancel)
   668  			})
   669  		}
   670  	)
   671  	sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
   672  
   673  	mkSource := func(name string) *testPeer {
   674  		source := newTestPeer(name, t, term)
   675  		source.accountTrie = sourceAccountTrie
   676  		source.accountValues = elems
   677  		return source
   678  	}
   679  	syncer := setupSyncer(mkSource("source"))
   680  	done := checkStall(t, term)
   681  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   682  		t.Fatalf("sync failed: %v", err)
   683  	}
   684  	close(done)
   685  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   686  }
   687  
   688  // TestMultiSync tests a basic sync with multiple peers
   689  func TestMultiSync(t *testing.T) {
   690  	t.Parallel()
   691  
   692  	var (
   693  		once   sync.Once
   694  		cancel = make(chan struct{})
   695  		term   = func() {
   696  			once.Do(func() {
   697  				close(cancel)
   698  			})
   699  		}
   700  	)
   701  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   702  
   703  	mkSource := func(name string) *testPeer {
   704  		source := newTestPeer(name, t, term)
   705  		source.accountTrie = sourceAccountTrie
   706  		source.accountValues = elems
   707  		return source
   708  	}
   709  	syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
   710  	done := checkStall(t, term)
   711  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   712  		t.Fatalf("sync failed: %v", err)
   713  	}
   714  	close(done)
   715  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   716  }
   717  
   718  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   719  func TestSyncWithStorage(t *testing.T) {
   720  	t.Parallel()
   721  
   722  	var (
   723  		once   sync.Once
   724  		cancel = make(chan struct{})
   725  		term   = func() {
   726  			once.Do(func() {
   727  				close(cancel)
   728  			})
   729  		}
   730  	)
   731  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
   732  
   733  	mkSource := func(name string) *testPeer {
   734  		source := newTestPeer(name, t, term)
   735  		source.accountTrie = sourceAccountTrie
   736  		source.accountValues = elems
   737  		source.storageTries = storageTries
   738  		source.storageValues = storageElems
   739  		return source
   740  	}
   741  	syncer := setupSyncer(mkSource("sourceA"))
   742  	done := checkStall(t, term)
   743  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   744  		t.Fatalf("sync failed: %v", err)
   745  	}
   746  	close(done)
   747  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   748  }
   749  
   750  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   751  func TestMultiSyncManyUseless(t *testing.T) {
   752  	t.Parallel()
   753  
   754  	var (
   755  		once   sync.Once
   756  		cancel = make(chan struct{})
   757  		term   = func() {
   758  			once.Do(func() {
   759  				close(cancel)
   760  			})
   761  		}
   762  	)
   763  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   764  
   765  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   766  		source := newTestPeer(name, t, term)
   767  		source.accountTrie = sourceAccountTrie
   768  		source.accountValues = elems
   769  		source.storageTries = storageTries
   770  		source.storageValues = storageElems
   771  
   772  		if !noAccount {
   773  			source.accountRequestHandler = emptyRequestAccountRangeFn
   774  		}
   775  		if !noStorage {
   776  			source.storageRequestHandler = emptyStorageRequestHandler
   777  		}
   778  		if !noTrieNode {
   779  			source.trieRequestHandler = emptyTrieRequestHandler
   780  		}
   781  		return source
   782  	}
   783  
   784  	syncer := setupSyncer(
   785  		mkSource("full", true, true, true),
   786  		mkSource("noAccounts", false, true, true),
   787  		mkSource("noStorage", true, false, true),
   788  		mkSource("noTrie", true, true, false),
   789  	)
   790  	done := checkStall(t, term)
   791  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   792  		t.Fatalf("sync failed: %v", err)
   793  	}
   794  	close(done)
   795  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   796  }
   797  
   798  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   799  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   800  	var (
   801  		once   sync.Once
   802  		cancel = make(chan struct{})
   803  		term   = func() {
   804  			once.Do(func() {
   805  				close(cancel)
   806  			})
   807  		}
   808  	)
   809  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   810  
   811  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   812  		source := newTestPeer(name, t, term)
   813  		source.accountTrie = sourceAccountTrie
   814  		source.accountValues = elems
   815  		source.storageTries = storageTries
   816  		source.storageValues = storageElems
   817  
   818  		if !noAccount {
   819  			source.accountRequestHandler = emptyRequestAccountRangeFn
   820  		}
   821  		if !noStorage {
   822  			source.storageRequestHandler = emptyStorageRequestHandler
   823  		}
   824  		if !noTrieNode {
   825  			source.trieRequestHandler = emptyTrieRequestHandler
   826  		}
   827  		return source
   828  	}
   829  
   830  	syncer := setupSyncer(
   831  		mkSource("full", true, true, true),
   832  		mkSource("noAccounts", false, true, true),
   833  		mkSource("noStorage", true, false, true),
   834  		mkSource("noTrie", true, true, false),
   835  	)
   836  	// We're setting the timeout to very low, to increase the chance of the timeout
   837  	// being triggered. This was previously a cause of panic, when a response
   838  	// arrived simultaneously as a timeout was triggered.
   839  	syncer.rates.OverrideTTLLimit = time.Millisecond
   840  
   841  	done := checkStall(t, term)
   842  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   843  		t.Fatalf("sync failed: %v", err)
   844  	}
   845  	close(done)
   846  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   847  }
   848  
   849  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   850  func TestMultiSyncManyUnresponsive(t *testing.T) {
   851  	var (
   852  		once   sync.Once
   853  		cancel = make(chan struct{})
   854  		term   = func() {
   855  			once.Do(func() {
   856  				close(cancel)
   857  			})
   858  		}
   859  	)
   860  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   861  
   862  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   863  		source := newTestPeer(name, t, term)
   864  		source.accountTrie = sourceAccountTrie
   865  		source.accountValues = elems
   866  		source.storageTries = storageTries
   867  		source.storageValues = storageElems
   868  
   869  		if !noAccount {
   870  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   871  		}
   872  		if !noStorage {
   873  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   874  		}
   875  		if !noTrieNode {
   876  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   877  		}
   878  		return source
   879  	}
   880  
   881  	syncer := setupSyncer(
   882  		mkSource("full", true, true, true),
   883  		mkSource("noAccounts", false, true, true),
   884  		mkSource("noStorage", true, false, true),
   885  		mkSource("noTrie", true, true, false),
   886  	)
   887  	// We're setting the timeout to very low, to make the test run a bit faster
   888  	syncer.rates.OverrideTTLLimit = time.Millisecond
   889  
   890  	done := checkStall(t, term)
   891  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   892  		t.Fatalf("sync failed: %v", err)
   893  	}
   894  	close(done)
   895  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   896  }
   897  
   898  func checkStall(t *testing.T, term func()) chan struct{} {
   899  	testDone := make(chan struct{})
   900  	go func() {
   901  		select {
   902  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   903  			t.Log("Sync stalled")
   904  			term()
   905  		case <-testDone:
   906  			return
   907  		}
   908  	}()
   909  	return testDone
   910  }
   911  
   912  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   913  // account trie has a few boundary elements.
   914  func TestSyncBoundaryAccountTrie(t *testing.T) {
   915  	t.Parallel()
   916  
   917  	var (
   918  		once   sync.Once
   919  		cancel = make(chan struct{})
   920  		term   = func() {
   921  			once.Do(func() {
   922  				close(cancel)
   923  			})
   924  		}
   925  	)
   926  	sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
   927  
   928  	mkSource := func(name string) *testPeer {
   929  		source := newTestPeer(name, t, term)
   930  		source.accountTrie = sourceAccountTrie
   931  		source.accountValues = elems
   932  		return source
   933  	}
   934  	syncer := setupSyncer(
   935  		mkSource("peer-a"),
   936  		mkSource("peer-b"),
   937  	)
   938  	done := checkStall(t, term)
   939  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   940  		t.Fatalf("sync failed: %v", err)
   941  	}
   942  	close(done)
   943  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   944  }
   945  
   946  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
   947  // consistently returning very small results
   948  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
   949  	t.Parallel()
   950  
   951  	var (
   952  		once   sync.Once
   953  		cancel = make(chan struct{})
   954  		term   = func() {
   955  			once.Do(func() {
   956  				close(cancel)
   957  			})
   958  		}
   959  	)
   960  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   961  
   962  	mkSource := func(name string, slow bool) *testPeer {
   963  		source := newTestPeer(name, t, term)
   964  		source.accountTrie = sourceAccountTrie
   965  		source.accountValues = elems
   966  
   967  		if slow {
   968  			source.accountRequestHandler = starvingAccountRequestHandler
   969  		}
   970  		return source
   971  	}
   972  
   973  	syncer := setupSyncer(
   974  		mkSource("nice-a", false),
   975  		mkSource("nice-b", false),
   976  		mkSource("nice-c", false),
   977  		mkSource("capped", true),
   978  	)
   979  	done := checkStall(t, term)
   980  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   981  		t.Fatalf("sync failed: %v", err)
   982  	}
   983  	close(done)
   984  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   985  }
   986  
   987  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
   988  // code requests properly.
   989  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
   990  	t.Parallel()
   991  
   992  	var (
   993  		once   sync.Once
   994  		cancel = make(chan struct{})
   995  		term   = func() {
   996  			once.Do(func() {
   997  				close(cancel)
   998  			})
   999  		}
  1000  	)
  1001  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1002  
  1003  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1004  		source := newTestPeer(name, t, term)
  1005  		source.accountTrie = sourceAccountTrie
  1006  		source.accountValues = elems
  1007  		source.codeRequestHandler = codeFn
  1008  		return source
  1009  	}
  1010  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1011  	// chance that the full set of codes requested are sent only to the
  1012  	// non-corrupt peer, which delivers everything in one go, and makes the
  1013  	// test moot
  1014  	syncer := setupSyncer(
  1015  		mkSource("capped", cappedCodeRequestHandler),
  1016  		mkSource("corrupt", corruptCodeRequestHandler),
  1017  	)
  1018  	done := checkStall(t, term)
  1019  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1020  		t.Fatalf("sync failed: %v", err)
  1021  	}
  1022  	close(done)
  1023  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1024  }
  1025  
  1026  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1027  	t.Parallel()
  1028  
  1029  	var (
  1030  		once   sync.Once
  1031  		cancel = make(chan struct{})
  1032  		term   = func() {
  1033  			once.Do(func() {
  1034  				close(cancel)
  1035  			})
  1036  		}
  1037  	)
  1038  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1039  
  1040  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1041  		source := newTestPeer(name, t, term)
  1042  		source.accountTrie = sourceAccountTrie
  1043  		source.accountValues = elems
  1044  		source.accountRequestHandler = accFn
  1045  		return source
  1046  	}
  1047  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1048  	// chance that the full set of codes requested are sent only to the
  1049  	// non-corrupt peer, which delivers everything in one go, and makes the
  1050  	// test moot
  1051  	syncer := setupSyncer(
  1052  		mkSource("capped", defaultAccountRequestHandler),
  1053  		mkSource("corrupt", corruptAccountRequestHandler),
  1054  	)
  1055  	done := checkStall(t, term)
  1056  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1057  		t.Fatalf("sync failed: %v", err)
  1058  	}
  1059  	close(done)
  1060  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1061  }
  1062  
  1063  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1064  // one by one
  1065  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1066  	t.Parallel()
  1067  
  1068  	var (
  1069  		once   sync.Once
  1070  		cancel = make(chan struct{})
  1071  		term   = func() {
  1072  			once.Do(func() {
  1073  				close(cancel)
  1074  			})
  1075  		}
  1076  	)
  1077  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1078  
  1079  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1080  		source := newTestPeer(name, t, term)
  1081  		source.accountTrie = sourceAccountTrie
  1082  		source.accountValues = elems
  1083  		source.codeRequestHandler = codeFn
  1084  		return source
  1085  	}
  1086  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1087  	// so it shouldn't be more than that
  1088  	var counter int
  1089  	syncer := setupSyncer(
  1090  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1091  			counter++
  1092  			return cappedCodeRequestHandler(t, id, hashes, max)
  1093  		}),
  1094  	)
  1095  	done := checkStall(t, term)
  1096  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1097  		t.Fatalf("sync failed: %v", err)
  1098  	}
  1099  	close(done)
  1100  
  1101  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1102  	// deduplication is per request batch. If it were a perfect global dedup,
  1103  	// we would expect only 8 requests. If there were no dedup, there would be
  1104  	// 3k requests.
  1105  	// We expect somewhere below 100 requests for these 8 unique hashes. But
  1106  	// the number can be flaky, so don't limit it so strictly.
  1107  	if threshold := 100; counter > threshold {
  1108  		t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
  1109  	}
  1110  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1111  }
  1112  
  1113  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1114  // storage trie has a few boundary elements.
  1115  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1116  	t.Parallel()
  1117  
  1118  	var (
  1119  		once   sync.Once
  1120  		cancel = make(chan struct{})
  1121  		term   = func() {
  1122  			once.Do(func() {
  1123  				close(cancel)
  1124  			})
  1125  		}
  1126  	)
  1127  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1128  
  1129  	mkSource := func(name string) *testPeer {
  1130  		source := newTestPeer(name, t, term)
  1131  		source.accountTrie = sourceAccountTrie
  1132  		source.accountValues = elems
  1133  		source.storageTries = storageTries
  1134  		source.storageValues = storageElems
  1135  		return source
  1136  	}
  1137  	syncer := setupSyncer(
  1138  		mkSource("peer-a"),
  1139  		mkSource("peer-b"),
  1140  	)
  1141  	done := checkStall(t, term)
  1142  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1143  		t.Fatalf("sync failed: %v", err)
  1144  	}
  1145  	close(done)
  1146  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1147  }
  1148  
  1149  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1150  // consistently returning very small results
  1151  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1152  	t.Parallel()
  1153  
  1154  	var (
  1155  		once   sync.Once
  1156  		cancel = make(chan struct{})
  1157  		term   = func() {
  1158  			once.Do(func() {
  1159  				close(cancel)
  1160  			})
  1161  		}
  1162  	)
  1163  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1164  
  1165  	mkSource := func(name string, slow bool) *testPeer {
  1166  		source := newTestPeer(name, t, term)
  1167  		source.accountTrie = sourceAccountTrie
  1168  		source.accountValues = elems
  1169  		source.storageTries = storageTries
  1170  		source.storageValues = storageElems
  1171  
  1172  		if slow {
  1173  			source.storageRequestHandler = starvingStorageRequestHandler
  1174  		}
  1175  		return source
  1176  	}
  1177  
  1178  	syncer := setupSyncer(
  1179  		mkSource("nice-a", false),
  1180  		mkSource("slow", true),
  1181  	)
  1182  	done := checkStall(t, term)
  1183  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1184  		t.Fatalf("sync failed: %v", err)
  1185  	}
  1186  	close(done)
  1187  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1188  }
  1189  
  1190  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1191  // sometimes sending bad proofs
  1192  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1193  	t.Parallel()
  1194  
  1195  	var (
  1196  		once   sync.Once
  1197  		cancel = make(chan struct{})
  1198  		term   = func() {
  1199  			once.Do(func() {
  1200  				close(cancel)
  1201  			})
  1202  		}
  1203  	)
  1204  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1205  
  1206  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1207  		source := newTestPeer(name, t, term)
  1208  		source.accountTrie = sourceAccountTrie
  1209  		source.accountValues = elems
  1210  		source.storageTries = storageTries
  1211  		source.storageValues = storageElems
  1212  		source.storageRequestHandler = handler
  1213  		return source
  1214  	}
  1215  
  1216  	syncer := setupSyncer(
  1217  		mkSource("nice-a", defaultStorageRequestHandler),
  1218  		mkSource("nice-b", defaultStorageRequestHandler),
  1219  		mkSource("nice-c", defaultStorageRequestHandler),
  1220  		mkSource("corrupt", corruptStorageRequestHandler),
  1221  	)
  1222  	done := checkStall(t, term)
  1223  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1224  		t.Fatalf("sync failed: %v", err)
  1225  	}
  1226  	close(done)
  1227  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1228  }
  1229  
  1230  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1231  	t.Parallel()
  1232  
  1233  	var (
  1234  		once   sync.Once
  1235  		cancel = make(chan struct{})
  1236  		term   = func() {
  1237  			once.Do(func() {
  1238  				close(cancel)
  1239  			})
  1240  		}
  1241  	)
  1242  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1243  
  1244  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1245  		source := newTestPeer(name, t, term)
  1246  		source.accountTrie = sourceAccountTrie
  1247  		source.accountValues = elems
  1248  		source.storageTries = storageTries
  1249  		source.storageValues = storageElems
  1250  		source.storageRequestHandler = handler
  1251  		return source
  1252  	}
  1253  	syncer := setupSyncer(
  1254  		mkSource("nice-a", defaultStorageRequestHandler),
  1255  		mkSource("nice-b", defaultStorageRequestHandler),
  1256  		mkSource("nice-c", defaultStorageRequestHandler),
  1257  		mkSource("corrupt", noProofStorageRequestHandler),
  1258  	)
  1259  	done := checkStall(t, term)
  1260  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1261  		t.Fatalf("sync failed: %v", err)
  1262  	}
  1263  	close(done)
  1264  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1265  }
  1266  
  1267  // TestSyncWithStorage tests  basic sync using accounts + storage + code, against
  1268  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1269  // an error, where the recipient erroneously clipped the boundary nodes, but
  1270  // did not mark the account for healing.
  1271  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1272  	t.Parallel()
  1273  	var (
  1274  		once   sync.Once
  1275  		cancel = make(chan struct{})
  1276  		term   = func() {
  1277  			once.Do(func() {
  1278  				close(cancel)
  1279  			})
  1280  		}
  1281  	)
  1282  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1283  
  1284  	mkSource := func(name string) *testPeer {
  1285  		source := newTestPeer(name, t, term)
  1286  		source.accountTrie = sourceAccountTrie
  1287  		source.accountValues = elems
  1288  		source.storageTries = storageTries
  1289  		source.storageValues = storageElems
  1290  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1291  		return source
  1292  	}
  1293  	syncer := setupSyncer(mkSource("sourceA"))
  1294  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1295  		t.Fatalf("sync failed: %v", err)
  1296  	}
  1297  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1298  }
  1299  
  1300  type kv struct {
  1301  	k, v []byte
  1302  }
  1303  
  1304  // Some helpers for sorting
  1305  type entrySlice []*kv
  1306  
  1307  func (p entrySlice) Len() int           { return len(p) }
  1308  func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1309  func (p entrySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
  1310  
  1311  func key32(i uint64) []byte {
  1312  	key := make([]byte, 32)
  1313  	binary.LittleEndian.PutUint64(key, i)
  1314  	return key
  1315  }
  1316  
  1317  var (
  1318  	codehashes = []common.Hash{
  1319  		crypto.Keccak256Hash([]byte{0}),
  1320  		crypto.Keccak256Hash([]byte{1}),
  1321  		crypto.Keccak256Hash([]byte{2}),
  1322  		crypto.Keccak256Hash([]byte{3}),
  1323  		crypto.Keccak256Hash([]byte{4}),
  1324  		crypto.Keccak256Hash([]byte{5}),
  1325  		crypto.Keccak256Hash([]byte{6}),
  1326  		crypto.Keccak256Hash([]byte{7}),
  1327  	}
  1328  )
  1329  
  1330  // getCodeHash returns a pseudo-random code hash
  1331  func getCodeHash(i uint64) []byte {
  1332  	h := codehashes[int(i)%len(codehashes)]
  1333  	return common.CopyBytes(h[:])
  1334  }
  1335  
  1336  // getCodeByHash convenience function to lookup the code from the code hash
  1337  func getCodeByHash(hash common.Hash) []byte {
  1338  	if hash == emptyCode {
  1339  		return nil
  1340  	}
  1341  	for i, h := range codehashes {
  1342  		if h == hash {
  1343  			return []byte{byte(i)}
  1344  		}
  1345  	}
  1346  	return nil
  1347  }
  1348  
  1349  // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1350  func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
  1351  	var (
  1352  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1353  		accTrie = trie.NewEmpty(db)
  1354  		entries entrySlice
  1355  	)
  1356  	for i := uint64(1); i <= uint64(n); i++ {
  1357  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1358  			Nonce:    i,
  1359  			Balance:  big.NewInt(int64(i)),
  1360  			Root:     emptyRoot,
  1361  			CodeHash: getCodeHash(i),
  1362  		})
  1363  		key := key32(i)
  1364  		elem := &kv{key, value}
  1365  		accTrie.Update(elem.k, elem.v)
  1366  		entries = append(entries, elem)
  1367  	}
  1368  	sort.Sort(entries)
  1369  
  1370  	// Commit the state changes into db and re-create the trie
  1371  	// for accessing later.
  1372  	root, nodes, _ := accTrie.Commit(false)
  1373  	db.Update(trie.NewWithNodeSet(nodes))
  1374  
  1375  	accTrie, _ = trie.New(common.Hash{}, root, db)
  1376  	return accTrie, entries
  1377  }
  1378  
  1379  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1380  // accounts normally, this function will fill a few accounts which have
  1381  // boundary hash.
  1382  func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
  1383  	var (
  1384  		entries    entrySlice
  1385  		boundaries []common.Hash
  1386  
  1387  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1388  		accTrie = trie.NewEmpty(db)
  1389  	)
  1390  	// Initialize boundaries
  1391  	var next common.Hash
  1392  	step := new(big.Int).Sub(
  1393  		new(big.Int).Div(
  1394  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1395  			big.NewInt(int64(accountConcurrency)),
  1396  		), common.Big1,
  1397  	)
  1398  	for i := 0; i < accountConcurrency; i++ {
  1399  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1400  		if i == accountConcurrency-1 {
  1401  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1402  		}
  1403  		boundaries = append(boundaries, last)
  1404  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1405  	}
  1406  	// Fill boundary accounts
  1407  	for i := 0; i < len(boundaries); i++ {
  1408  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1409  			Nonce:    uint64(0),
  1410  			Balance:  big.NewInt(int64(i)),
  1411  			Root:     emptyRoot,
  1412  			CodeHash: getCodeHash(uint64(i)),
  1413  		})
  1414  		elem := &kv{boundaries[i].Bytes(), value}
  1415  		accTrie.Update(elem.k, elem.v)
  1416  		entries = append(entries, elem)
  1417  	}
  1418  	// Fill other accounts if required
  1419  	for i := uint64(1); i <= uint64(n); i++ {
  1420  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1421  			Nonce:    i,
  1422  			Balance:  big.NewInt(int64(i)),
  1423  			Root:     emptyRoot,
  1424  			CodeHash: getCodeHash(i),
  1425  		})
  1426  		elem := &kv{key32(i), value}
  1427  		accTrie.Update(elem.k, elem.v)
  1428  		entries = append(entries, elem)
  1429  	}
  1430  	sort.Sort(entries)
  1431  
  1432  	// Commit the state changes into db and re-create the trie
  1433  	// for accessing later.
  1434  	root, nodes, _ := accTrie.Commit(false)
  1435  	db.Update(trie.NewWithNodeSet(nodes))
  1436  
  1437  	accTrie, _ = trie.New(common.Hash{}, root, db)
  1438  	return accTrie, entries
  1439  }
  1440  
  1441  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1442  // has a unique storage set.
  1443  func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1444  	var (
  1445  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1446  		accTrie        = trie.NewEmpty(db)
  1447  		entries        entrySlice
  1448  		storageRoots   = make(map[common.Hash]common.Hash)
  1449  		storageTries   = make(map[common.Hash]*trie.Trie)
  1450  		storageEntries = make(map[common.Hash]entrySlice)
  1451  		nodes          = trie.NewMergedNodeSet()
  1452  	)
  1453  	// Create n accounts in the trie
  1454  	for i := uint64(1); i <= uint64(accounts); i++ {
  1455  		key := key32(i)
  1456  		codehash := emptyCode[:]
  1457  		if code {
  1458  			codehash = getCodeHash(i)
  1459  		}
  1460  		// Create a storage trie
  1461  		stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
  1462  		nodes.Merge(stNodes)
  1463  
  1464  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1465  			Nonce:    i,
  1466  			Balance:  big.NewInt(int64(i)),
  1467  			Root:     stRoot,
  1468  			CodeHash: codehash,
  1469  		})
  1470  		elem := &kv{key, value}
  1471  		accTrie.Update(elem.k, elem.v)
  1472  		entries = append(entries, elem)
  1473  
  1474  		storageRoots[common.BytesToHash(key)] = stRoot
  1475  		storageEntries[common.BytesToHash(key)] = stEntries
  1476  	}
  1477  	sort.Sort(entries)
  1478  
  1479  	// Commit account trie
  1480  	root, set, _ := accTrie.Commit(true)
  1481  	nodes.Merge(set)
  1482  
  1483  	// Commit gathered dirty nodes into database
  1484  	db.Update(nodes)
  1485  
  1486  	// Re-create tries with new root
  1487  	accTrie, _ = trie.New(common.Hash{}, root, db)
  1488  	for i := uint64(1); i <= uint64(accounts); i++ {
  1489  		key := key32(i)
  1490  		trie, _ := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
  1491  		storageTries[common.BytesToHash(key)] = trie
  1492  	}
  1493  	return accTrie, entries, storageTries, storageEntries
  1494  }
  1495  
  1496  // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1497  func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1498  	var (
  1499  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1500  		accTrie        = trie.NewEmpty(db)
  1501  		entries        entrySlice
  1502  		storageRoots   = make(map[common.Hash]common.Hash)
  1503  		storageTries   = make(map[common.Hash]*trie.Trie)
  1504  		storageEntries = make(map[common.Hash]entrySlice)
  1505  		nodes          = trie.NewMergedNodeSet()
  1506  	)
  1507  	// Create n accounts in the trie
  1508  	for i := uint64(1); i <= uint64(accounts); i++ {
  1509  		key := key32(i)
  1510  		codehash := emptyCode[:]
  1511  		if code {
  1512  			codehash = getCodeHash(i)
  1513  		}
  1514  		// Make a storage trie
  1515  		var (
  1516  			stRoot    common.Hash
  1517  			stNodes   *trie.NodeSet
  1518  			stEntries entrySlice
  1519  		)
  1520  		if boundary {
  1521  			stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
  1522  		} else {
  1523  			stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
  1524  		}
  1525  		nodes.Merge(stNodes)
  1526  
  1527  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1528  			Nonce:    i,
  1529  			Balance:  big.NewInt(int64(i)),
  1530  			Root:     stRoot,
  1531  			CodeHash: codehash,
  1532  		})
  1533  		elem := &kv{key, value}
  1534  		accTrie.Update(elem.k, elem.v)
  1535  		entries = append(entries, elem)
  1536  
  1537  		// we reuse the same one for all accounts
  1538  		storageRoots[common.BytesToHash(key)] = stRoot
  1539  		storageEntries[common.BytesToHash(key)] = stEntries
  1540  	}
  1541  	sort.Sort(entries)
  1542  
  1543  	// Commit account trie
  1544  	root, set, _ := accTrie.Commit(true)
  1545  	nodes.Merge(set)
  1546  
  1547  	// Commit gathered dirty nodes into database
  1548  	db.Update(nodes)
  1549  
  1550  	// Re-create tries with new root
  1551  	accTrie, err := trie.New(common.Hash{}, root, db)
  1552  	if err != nil {
  1553  		panic(err)
  1554  	}
  1555  	for i := uint64(1); i <= uint64(accounts); i++ {
  1556  		key := key32(i)
  1557  		trie, err := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
  1558  		if err != nil {
  1559  			panic(err)
  1560  		}
  1561  		storageTries[common.BytesToHash(key)] = trie
  1562  	}
  1563  	return accTrie, entries, storageTries, storageEntries
  1564  }
  1565  
  1566  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1567  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1568  // that tries are unique.
  1569  func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
  1570  	trie, _ := trie.New(owner, common.Hash{}, db)
  1571  	var entries entrySlice
  1572  	for i := uint64(1); i <= n; i++ {
  1573  		// store 'x' at slot 'x'
  1574  		slotValue := key32(i + seed)
  1575  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1576  
  1577  		slotKey := key32(i)
  1578  		key := crypto.Keccak256Hash(slotKey[:])
  1579  
  1580  		elem := &kv{key[:], rlpSlotValue}
  1581  		trie.Update(elem.k, elem.v)
  1582  		entries = append(entries, elem)
  1583  	}
  1584  	sort.Sort(entries)
  1585  	root, nodes, _ := trie.Commit(false)
  1586  	return root, nodes, entries
  1587  }
  1588  
  1589  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1590  // storage slots normally, this function will fill a few slots which have
  1591  // boundary hash.
  1592  func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
  1593  	var (
  1594  		entries    entrySlice
  1595  		boundaries []common.Hash
  1596  		trie, _    = trie.New(owner, common.Hash{}, db)
  1597  	)
  1598  	// Initialize boundaries
  1599  	var next common.Hash
  1600  	step := new(big.Int).Sub(
  1601  		new(big.Int).Div(
  1602  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1603  			big.NewInt(int64(accountConcurrency)),
  1604  		), common.Big1,
  1605  	)
  1606  	for i := 0; i < accountConcurrency; i++ {
  1607  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1608  		if i == accountConcurrency-1 {
  1609  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1610  		}
  1611  		boundaries = append(boundaries, last)
  1612  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1613  	}
  1614  	// Fill boundary slots
  1615  	for i := 0; i < len(boundaries); i++ {
  1616  		key := boundaries[i]
  1617  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1618  
  1619  		elem := &kv{key[:], val}
  1620  		trie.Update(elem.k, elem.v)
  1621  		entries = append(entries, elem)
  1622  	}
  1623  	// Fill other slots if required
  1624  	for i := uint64(1); i <= uint64(n); i++ {
  1625  		slotKey := key32(i)
  1626  		key := crypto.Keccak256Hash(slotKey[:])
  1627  
  1628  		slotValue := key32(i)
  1629  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1630  
  1631  		elem := &kv{key[:], rlpSlotValue}
  1632  		trie.Update(elem.k, elem.v)
  1633  		entries = append(entries, elem)
  1634  	}
  1635  	sort.Sort(entries)
  1636  	root, nodes, _ := trie.Commit(false)
  1637  	return root, nodes, entries
  1638  }
  1639  
  1640  func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1641  	t.Helper()
  1642  	triedb := trie.NewDatabase(db)
  1643  	accTrie, err := trie.New(common.Hash{}, root, triedb)
  1644  	if err != nil {
  1645  		t.Fatal(err)
  1646  	}
  1647  	accounts, slots := 0, 0
  1648  	accIt := trie.NewIterator(accTrie.NodeIterator(nil))
  1649  	for accIt.Next() {
  1650  		var acc struct {
  1651  			Nonce    uint64
  1652  			Balance  *big.Int
  1653  			Root     common.Hash
  1654  			CodeHash []byte
  1655  		}
  1656  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1657  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1658  		}
  1659  		accounts++
  1660  		if acc.Root != emptyRoot {
  1661  			storeTrie, err := trie.NewStateTrie(common.BytesToHash(accIt.Key), acc.Root, triedb)
  1662  			if err != nil {
  1663  				t.Fatal(err)
  1664  			}
  1665  			storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
  1666  			for storeIt.Next() {
  1667  				slots++
  1668  			}
  1669  			if err := storeIt.Err; err != nil {
  1670  				t.Fatal(err)
  1671  			}
  1672  		}
  1673  	}
  1674  	if err := accIt.Err; err != nil {
  1675  		t.Fatal(err)
  1676  	}
  1677  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1678  }
  1679  
  1680  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1681  // state healing
  1682  func TestSyncAccountPerformance(t *testing.T) {
  1683  	// Set the account concurrency to 1. This _should_ result in the
  1684  	// range root to become correct, and there should be no healing needed
  1685  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1686  	accountConcurrency = 1
  1687  
  1688  	var (
  1689  		once   sync.Once
  1690  		cancel = make(chan struct{})
  1691  		term   = func() {
  1692  			once.Do(func() {
  1693  				close(cancel)
  1694  			})
  1695  		}
  1696  	)
  1697  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  1698  
  1699  	mkSource := func(name string) *testPeer {
  1700  		source := newTestPeer(name, t, term)
  1701  		source.accountTrie = sourceAccountTrie
  1702  		source.accountValues = elems
  1703  		return source
  1704  	}
  1705  	src := mkSource("source")
  1706  	syncer := setupSyncer(src)
  1707  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1708  		t.Fatalf("sync failed: %v", err)
  1709  	}
  1710  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1711  	// The trie root will always be requested, since it is added when the snap
  1712  	// sync cycle starts. When popping the queue, we do not look it up again.
  1713  	// Doing so would bring this number down to zero in this artificial testcase,
  1714  	// but only add extra IO for no reason in practice.
  1715  	if have, want := src.nTrienodeRequests, 1; have != want {
  1716  		fmt.Print(src.Stats())
  1717  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1718  	}
  1719  }
  1720  
  1721  func TestSlotEstimation(t *testing.T) {
  1722  	for i, tc := range []struct {
  1723  		last  common.Hash
  1724  		count int
  1725  		want  uint64
  1726  	}{
  1727  		{
  1728  			// Half the space
  1729  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1730  			100,
  1731  			100,
  1732  		},
  1733  		{
  1734  			// 1 / 16th
  1735  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1736  			100,
  1737  			1500,
  1738  		},
  1739  		{
  1740  			// Bit more than 1 / 16th
  1741  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1742  			100,
  1743  			1499,
  1744  		},
  1745  		{
  1746  			// Almost everything
  1747  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1748  			100,
  1749  			6,
  1750  		},
  1751  		{
  1752  			// Almost nothing -- should lead to error
  1753  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1754  			1,
  1755  			0,
  1756  		},
  1757  		{
  1758  			// Nothing -- should lead to error
  1759  			common.Hash{},
  1760  			100,
  1761  			0,
  1762  		},
  1763  	} {
  1764  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1765  		if want := tc.want; have != want {
  1766  			t.Errorf("test %d: have %d want %d", i, have, want)
  1767  		}
  1768  	}
  1769  }