github.com/calmw/ethereum@v0.1.1/eth/protocols/snap/sync_test.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	"sort"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/calmw/ethereum/common"
    31  	"github.com/calmw/ethereum/core/rawdb"
    32  	"github.com/calmw/ethereum/core/types"
    33  	"github.com/calmw/ethereum/crypto"
    34  	"github.com/calmw/ethereum/ethdb"
    35  	"github.com/calmw/ethereum/light"
    36  	"github.com/calmw/ethereum/log"
    37  	"github.com/calmw/ethereum/rlp"
    38  	"github.com/calmw/ethereum/trie"
    39  	"github.com/calmw/ethereum/trie/trienode"
    40  	"golang.org/x/crypto/sha3"
    41  )
    42  
    43  func TestHashing(t *testing.T) {
    44  	t.Parallel()
    45  
    46  	var bytecodes = make([][]byte, 10)
    47  	for i := 0; i < len(bytecodes); i++ {
    48  		buf := make([]byte, 100)
    49  		rand.Read(buf)
    50  		bytecodes[i] = buf
    51  	}
    52  	var want, got string
    53  	var old = func() {
    54  		hasher := sha3.NewLegacyKeccak256()
    55  		for i := 0; i < len(bytecodes); i++ {
    56  			hasher.Reset()
    57  			hasher.Write(bytecodes[i])
    58  			hash := hasher.Sum(nil)
    59  			got = fmt.Sprintf("%v\n%v", got, hash)
    60  		}
    61  	}
    62  	var new = func() {
    63  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    64  		var hash = make([]byte, 32)
    65  		for i := 0; i < len(bytecodes); i++ {
    66  			hasher.Reset()
    67  			hasher.Write(bytecodes[i])
    68  			hasher.Read(hash)
    69  			want = fmt.Sprintf("%v\n%v", want, hash)
    70  		}
    71  	}
    72  	old()
    73  	new()
    74  	if want != got {
    75  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    76  	}
    77  }
    78  
    79  func BenchmarkHashing(b *testing.B) {
    80  	var bytecodes = make([][]byte, 10000)
    81  	for i := 0; i < len(bytecodes); i++ {
    82  		buf := make([]byte, 100)
    83  		rand.Read(buf)
    84  		bytecodes[i] = buf
    85  	}
    86  	var old = func() {
    87  		hasher := sha3.NewLegacyKeccak256()
    88  		for i := 0; i < len(bytecodes); i++ {
    89  			hasher.Reset()
    90  			hasher.Write(bytecodes[i])
    91  			hasher.Sum(nil)
    92  		}
    93  	}
    94  	var new = func() {
    95  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    96  		var hash = make([]byte, 32)
    97  		for i := 0; i < len(bytecodes); i++ {
    98  			hasher.Reset()
    99  			hasher.Write(bytecodes[i])
   100  			hasher.Read(hash)
   101  		}
   102  	}
   103  	b.Run("old", func(b *testing.B) {
   104  		b.ReportAllocs()
   105  		for i := 0; i < b.N; i++ {
   106  			old()
   107  		}
   108  	})
   109  	b.Run("new", func(b *testing.B) {
   110  		b.ReportAllocs()
   111  		for i := 0; i < b.N; i++ {
   112  			new()
   113  		}
   114  	})
   115  }
   116  
   117  type (
   118  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   119  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   120  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   121  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   122  )
   123  
   124  type testPeer struct {
   125  	id            string
   126  	test          *testing.T
   127  	remote        *Syncer
   128  	logger        log.Logger
   129  	accountTrie   *trie.Trie
   130  	accountValues entrySlice
   131  	storageTries  map[common.Hash]*trie.Trie
   132  	storageValues map[common.Hash]entrySlice
   133  
   134  	accountRequestHandler accountHandlerFunc
   135  	storageRequestHandler storageHandlerFunc
   136  	trieRequestHandler    trieHandlerFunc
   137  	codeRequestHandler    codeHandlerFunc
   138  	term                  func()
   139  
   140  	// counters
   141  	nAccountRequests  int
   142  	nStorageRequests  int
   143  	nBytecodeRequests int
   144  	nTrienodeRequests int
   145  }
   146  
   147  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   148  	peer := &testPeer{
   149  		id:                    id,
   150  		test:                  t,
   151  		logger:                log.New("id", id),
   152  		accountRequestHandler: defaultAccountRequestHandler,
   153  		trieRequestHandler:    defaultTrieRequestHandler,
   154  		storageRequestHandler: defaultStorageRequestHandler,
   155  		codeRequestHandler:    defaultCodeRequestHandler,
   156  		term:                  term,
   157  	}
   158  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   159  	//peer.logger.SetHandler(stderrHandler)
   160  	return peer
   161  }
   162  
   163  func (t *testPeer) setStorageTries(tries map[common.Hash]*trie.Trie) {
   164  	t.storageTries = make(map[common.Hash]*trie.Trie)
   165  	for root, trie := range tries {
   166  		t.storageTries[root] = trie.Copy()
   167  	}
   168  }
   169  
   170  func (t *testPeer) ID() string      { return t.id }
   171  func (t *testPeer) Log() log.Logger { return t.logger }
   172  
   173  func (t *testPeer) Stats() string {
   174  	return fmt.Sprintf(`Account requests: %d
   175  Storage requests: %d
   176  Bytecode requests: %d
   177  Trienode requests: %d
   178  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   179  }
   180  
   181  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   182  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   183  	t.nAccountRequests++
   184  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   185  	return nil
   186  }
   187  
   188  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   189  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   190  	t.nTrienodeRequests++
   191  	go t.trieRequestHandler(t, id, root, paths, bytes)
   192  	return nil
   193  }
   194  
   195  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   196  	t.nStorageRequests++
   197  	if len(accounts) == 1 && origin != nil {
   198  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   199  	} else {
   200  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   201  	}
   202  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   203  	return nil
   204  }
   205  
   206  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   207  	t.nBytecodeRequests++
   208  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   209  	go t.codeRequestHandler(t, id, hashes, bytes)
   210  	return nil
   211  }
   212  
   213  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   214  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   215  	// Pass the response
   216  	var nodes [][]byte
   217  	for _, pathset := range paths {
   218  		switch len(pathset) {
   219  		case 1:
   220  			blob, _, err := t.accountTrie.GetNode(pathset[0])
   221  			if err != nil {
   222  				t.logger.Info("Error handling req", "error", err)
   223  				break
   224  			}
   225  			nodes = append(nodes, blob)
   226  		default:
   227  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   228  			for _, path := range pathset[1:] {
   229  				blob, _, err := account.GetNode(path)
   230  				if err != nil {
   231  					t.logger.Info("Error handling req", "error", err)
   232  					break
   233  				}
   234  				nodes = append(nodes, blob)
   235  			}
   236  		}
   237  	}
   238  	t.remote.OnTrieNodes(t, requestId, nodes)
   239  	return nil
   240  }
   241  
   242  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   243  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   244  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   245  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   246  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   247  		t.term()
   248  		return err
   249  	}
   250  	return nil
   251  }
   252  
   253  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   254  	var size uint64
   255  	if limit == (common.Hash{}) {
   256  		limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   257  	}
   258  	for _, entry := range t.accountValues {
   259  		if size > cap {
   260  			break
   261  		}
   262  		if bytes.Compare(origin[:], entry.k) <= 0 {
   263  			keys = append(keys, common.BytesToHash(entry.k))
   264  			vals = append(vals, entry.v)
   265  			size += uint64(32 + len(entry.v))
   266  		}
   267  		// If we've exceeded the request threshold, abort
   268  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   269  			break
   270  		}
   271  	}
   272  	// Unless we send the entire trie, we need to supply proofs
   273  	// Actually, we need to supply proofs either way! This seems to be an implementation
   274  	// quirk in go-ethereum
   275  	proof := light.NewNodeSet()
   276  	if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   277  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   278  	}
   279  	if len(keys) > 0 {
   280  		lastK := (keys[len(keys)-1])[:]
   281  		if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
   282  			t.logger.Error("Could not prove last item", "error", err)
   283  		}
   284  	}
   285  	for _, blob := range proof.NodeList() {
   286  		proofs = append(proofs, blob)
   287  	}
   288  	return keys, vals, proofs
   289  }
   290  
   291  // defaultStorageRequestHandler is a well-behaving storage request handler
   292  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   293  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   294  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   295  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   296  		t.term()
   297  	}
   298  	return nil
   299  }
   300  
   301  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   302  	var bytecodes [][]byte
   303  	for _, h := range hashes {
   304  		bytecodes = append(bytecodes, getCodeByHash(h))
   305  	}
   306  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   307  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   308  		t.term()
   309  	}
   310  	return nil
   311  }
   312  
   313  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   314  	var size uint64
   315  	for _, account := range accounts {
   316  		// The first account might start from a different origin and end sooner
   317  		var originHash common.Hash
   318  		if len(origin) > 0 {
   319  			originHash = common.BytesToHash(origin)
   320  		}
   321  		var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   322  		if len(limit) > 0 {
   323  			limitHash = common.BytesToHash(limit)
   324  		}
   325  		var (
   326  			keys  []common.Hash
   327  			vals  [][]byte
   328  			abort bool
   329  		)
   330  		for _, entry := range t.storageValues[account] {
   331  			if size >= max {
   332  				abort = true
   333  				break
   334  			}
   335  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   336  				continue
   337  			}
   338  			keys = append(keys, common.BytesToHash(entry.k))
   339  			vals = append(vals, entry.v)
   340  			size += uint64(32 + len(entry.v))
   341  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   342  				break
   343  			}
   344  		}
   345  		if len(keys) > 0 {
   346  			hashes = append(hashes, keys)
   347  			slots = append(slots, vals)
   348  		}
   349  		// Generate the Merkle proofs for the first and last storage slot, but
   350  		// only if the response was capped. If the entire storage trie included
   351  		// in the response, no need for any proofs.
   352  		if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
   353  			// If we're aborting, we need to prove the first and last item
   354  			// This terminates the response (and thus the loop)
   355  			proof := light.NewNodeSet()
   356  			stTrie := t.storageTries[account]
   357  
   358  			// Here's a potential gotcha: when constructing the proof, we cannot
   359  			// use the 'origin' slice directly, but must use the full 32-byte
   360  			// hash form.
   361  			if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
   362  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   363  			}
   364  			if len(keys) > 0 {
   365  				lastK := (keys[len(keys)-1])[:]
   366  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   367  					t.logger.Error("Could not prove last item", "error", err)
   368  				}
   369  			}
   370  			for _, blob := range proof.NodeList() {
   371  				proofs = append(proofs, blob)
   372  			}
   373  			break
   374  		}
   375  	}
   376  	return hashes, slots, proofs
   377  }
   378  
   379  // createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always
   380  // supplies the proof for the last account, even if it is 'complete'.
   381  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   382  	var size uint64
   383  	max = max * 3 / 4
   384  
   385  	var origin common.Hash
   386  	if len(bOrigin) > 0 {
   387  		origin = common.BytesToHash(bOrigin)
   388  	}
   389  	var exit bool
   390  	for i, account := range accounts {
   391  		var keys []common.Hash
   392  		var vals [][]byte
   393  		for _, entry := range t.storageValues[account] {
   394  			if bytes.Compare(entry.k, origin[:]) < 0 {
   395  				exit = true
   396  			}
   397  			keys = append(keys, common.BytesToHash(entry.k))
   398  			vals = append(vals, entry.v)
   399  			size += uint64(32 + len(entry.v))
   400  			if size > max {
   401  				exit = true
   402  			}
   403  		}
   404  		if i == len(accounts)-1 {
   405  			exit = true
   406  		}
   407  		hashes = append(hashes, keys)
   408  		slots = append(slots, vals)
   409  
   410  		if exit {
   411  			// If we're aborting, we need to prove the first and last item
   412  			// This terminates the response (and thus the loop)
   413  			proof := light.NewNodeSet()
   414  			stTrie := t.storageTries[account]
   415  
   416  			// Here's a potential gotcha: when constructing the proof, we cannot
   417  			// use the 'origin' slice directly, but must use the full 32-byte
   418  			// hash form.
   419  			if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   420  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   421  					"error", err)
   422  			}
   423  			if len(keys) > 0 {
   424  				lastK := (keys[len(keys)-1])[:]
   425  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   426  					t.logger.Error("Could not prove last item", "error", err)
   427  				}
   428  			}
   429  			for _, blob := range proof.NodeList() {
   430  				proofs = append(proofs, blob)
   431  			}
   432  			break
   433  		}
   434  	}
   435  	return hashes, slots, proofs
   436  }
   437  
   438  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   439  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   440  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   441  	return nil
   442  }
   443  
   444  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   445  	return nil
   446  }
   447  
   448  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   449  	t.remote.OnTrieNodes(t, requestId, nil)
   450  	return nil
   451  }
   452  
   453  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   454  	return nil
   455  }
   456  
   457  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   458  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   459  	return nil
   460  }
   461  
   462  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   463  	return nil
   464  }
   465  
   466  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   467  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   468  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   469  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   470  		t.term()
   471  	}
   472  	return nil
   473  }
   474  
   475  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   476  //	var bytecodes [][]byte
   477  //	t.remote.OnByteCodes(t, id, bytecodes)
   478  //	return nil
   479  //}
   480  
   481  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   482  	var bytecodes [][]byte
   483  	for _, h := range hashes {
   484  		// Send back the hashes
   485  		bytecodes = append(bytecodes, h[:])
   486  	}
   487  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   488  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   489  		// Mimic the real-life handler, which drops a peer on errors
   490  		t.remote.Unregister(t.id)
   491  	}
   492  	return nil
   493  }
   494  
   495  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   496  	var bytecodes [][]byte
   497  	for _, h := range hashes[:1] {
   498  		bytecodes = append(bytecodes, getCodeByHash(h))
   499  	}
   500  	// Missing bytecode can be retrieved again, no error expected
   501  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   502  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   503  		t.term()
   504  	}
   505  	return nil
   506  }
   507  
   508  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   509  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   510  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   511  }
   512  
   513  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   514  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   515  }
   516  
   517  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   518  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   519  //}
   520  
   521  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   522  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   523  	if len(proofs) > 0 {
   524  		proofs = proofs[1:]
   525  	}
   526  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   527  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   528  		// Mimic the real-life handler, which drops a peer on errors
   529  		t.remote.Unregister(t.id)
   530  	}
   531  	return nil
   532  }
   533  
   534  // corruptStorageRequestHandler doesn't provide good proofs
   535  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   536  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   537  	if len(proofs) > 0 {
   538  		proofs = proofs[1:]
   539  	}
   540  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   541  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   542  		// Mimic the real-life handler, which drops a peer on errors
   543  		t.remote.Unregister(t.id)
   544  	}
   545  	return nil
   546  }
   547  
   548  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   549  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   550  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   551  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   552  		// Mimic the real-life handler, which drops a peer on errors
   553  		t.remote.Unregister(t.id)
   554  	}
   555  	return nil
   556  }
   557  
   558  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   559  // also ship the entire trie inside the proof. If the attack is successful,
   560  // the remote side does not do any follow-up requests
   561  func TestSyncBloatedProof(t *testing.T) {
   562  	t.Parallel()
   563  
   564  	var (
   565  		once   sync.Once
   566  		cancel = make(chan struct{})
   567  		term   = func() {
   568  			once.Do(func() {
   569  				close(cancel)
   570  			})
   571  		}
   572  	)
   573  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   574  	source := newTestPeer("source", t, term)
   575  	source.accountTrie = sourceAccountTrie.Copy()
   576  	source.accountValues = elems
   577  
   578  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   579  		var (
   580  			proofs [][]byte
   581  			keys   []common.Hash
   582  			vals   [][]byte
   583  		)
   584  		// The values
   585  		for _, entry := range t.accountValues {
   586  			if bytes.Compare(entry.k, origin[:]) < 0 {
   587  				continue
   588  			}
   589  			if bytes.Compare(entry.k, limit[:]) > 0 {
   590  				continue
   591  			}
   592  			keys = append(keys, common.BytesToHash(entry.k))
   593  			vals = append(vals, entry.v)
   594  		}
   595  		// The proofs
   596  		proof := light.NewNodeSet()
   597  		if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   598  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   599  		}
   600  		// The bloat: add proof of every single element
   601  		for _, entry := range t.accountValues {
   602  			if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
   603  				t.logger.Error("Could not prove item", "error", err)
   604  			}
   605  		}
   606  		// And remove one item from the elements
   607  		if len(keys) > 2 {
   608  			keys = append(keys[:1], keys[2:]...)
   609  			vals = append(vals[:1], vals[2:]...)
   610  		}
   611  		for _, blob := range proof.NodeList() {
   612  			proofs = append(proofs, blob)
   613  		}
   614  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   615  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   616  			t.term()
   617  			// This is actually correct, signal to exit the test successfully
   618  		}
   619  		return nil
   620  	}
   621  	syncer := setupSyncer(nodeScheme, source)
   622  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   623  		t.Fatal("No error returned from incomplete/cancelled sync")
   624  	}
   625  }
   626  
   627  func setupSyncer(scheme string, peers ...*testPeer) *Syncer {
   628  	stateDb := rawdb.NewMemoryDatabase()
   629  	syncer := NewSyncer(stateDb, scheme)
   630  	for _, peer := range peers {
   631  		syncer.Register(peer)
   632  		peer.remote = syncer
   633  	}
   634  	return syncer
   635  }
   636  
   637  // TestSync tests a basic sync with one peer
   638  func TestSync(t *testing.T) {
   639  	t.Parallel()
   640  
   641  	var (
   642  		once   sync.Once
   643  		cancel = make(chan struct{})
   644  		term   = func() {
   645  			once.Do(func() {
   646  				close(cancel)
   647  			})
   648  		}
   649  	)
   650  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   651  
   652  	mkSource := func(name string) *testPeer {
   653  		source := newTestPeer(name, t, term)
   654  		source.accountTrie = sourceAccountTrie.Copy()
   655  		source.accountValues = elems
   656  		return source
   657  	}
   658  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   659  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   660  		t.Fatalf("sync failed: %v", err)
   661  	}
   662  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   663  }
   664  
   665  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   666  // panic within the prover
   667  func TestSyncTinyTriePanic(t *testing.T) {
   668  	t.Parallel()
   669  
   670  	var (
   671  		once   sync.Once
   672  		cancel = make(chan struct{})
   673  		term   = func() {
   674  			once.Do(func() {
   675  				close(cancel)
   676  			})
   677  		}
   678  	)
   679  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
   680  
   681  	mkSource := func(name string) *testPeer {
   682  		source := newTestPeer(name, t, term)
   683  		source.accountTrie = sourceAccountTrie.Copy()
   684  		source.accountValues = elems
   685  		return source
   686  	}
   687  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   688  	done := checkStall(t, term)
   689  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   690  		t.Fatalf("sync failed: %v", err)
   691  	}
   692  	close(done)
   693  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   694  }
   695  
   696  // TestMultiSync tests a basic sync with multiple peers
   697  func TestMultiSync(t *testing.T) {
   698  	t.Parallel()
   699  
   700  	var (
   701  		once   sync.Once
   702  		cancel = make(chan struct{})
   703  		term   = func() {
   704  			once.Do(func() {
   705  				close(cancel)
   706  			})
   707  		}
   708  	)
   709  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   710  
   711  	mkSource := func(name string) *testPeer {
   712  		source := newTestPeer(name, t, term)
   713  		source.accountTrie = sourceAccountTrie.Copy()
   714  		source.accountValues = elems
   715  		return source
   716  	}
   717  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"), mkSource("sourceB"))
   718  	done := checkStall(t, term)
   719  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   720  		t.Fatalf("sync failed: %v", err)
   721  	}
   722  	close(done)
   723  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   724  }
   725  
   726  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   727  func TestSyncWithStorage(t *testing.T) {
   728  	t.Parallel()
   729  
   730  	var (
   731  		once   sync.Once
   732  		cancel = make(chan struct{})
   733  		term   = func() {
   734  			once.Do(func() {
   735  				close(cancel)
   736  			})
   737  		}
   738  	)
   739  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
   740  
   741  	mkSource := func(name string) *testPeer {
   742  		source := newTestPeer(name, t, term)
   743  		source.accountTrie = sourceAccountTrie.Copy()
   744  		source.accountValues = elems
   745  		source.setStorageTries(storageTries)
   746  		source.storageValues = storageElems
   747  		return source
   748  	}
   749  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
   750  	done := checkStall(t, term)
   751  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   752  		t.Fatalf("sync failed: %v", err)
   753  	}
   754  	close(done)
   755  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   756  }
   757  
   758  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   759  func TestMultiSyncManyUseless(t *testing.T) {
   760  	t.Parallel()
   761  
   762  	var (
   763  		once   sync.Once
   764  		cancel = make(chan struct{})
   765  		term   = func() {
   766  			once.Do(func() {
   767  				close(cancel)
   768  			})
   769  		}
   770  	)
   771  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   772  
   773  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   774  		source := newTestPeer(name, t, term)
   775  		source.accountTrie = sourceAccountTrie.Copy()
   776  		source.accountValues = elems
   777  		source.setStorageTries(storageTries)
   778  		source.storageValues = storageElems
   779  
   780  		if !noAccount {
   781  			source.accountRequestHandler = emptyRequestAccountRangeFn
   782  		}
   783  		if !noStorage {
   784  			source.storageRequestHandler = emptyStorageRequestHandler
   785  		}
   786  		if !noTrieNode {
   787  			source.trieRequestHandler = emptyTrieRequestHandler
   788  		}
   789  		return source
   790  	}
   791  
   792  	syncer := setupSyncer(
   793  		nodeScheme,
   794  		mkSource("full", true, true, true),
   795  		mkSource("noAccounts", false, true, true),
   796  		mkSource("noStorage", true, false, true),
   797  		mkSource("noTrie", true, true, false),
   798  	)
   799  	done := checkStall(t, term)
   800  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   801  		t.Fatalf("sync failed: %v", err)
   802  	}
   803  	close(done)
   804  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   805  }
   806  
   807  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   808  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   809  	var (
   810  		once   sync.Once
   811  		cancel = make(chan struct{})
   812  		term   = func() {
   813  			once.Do(func() {
   814  				close(cancel)
   815  			})
   816  		}
   817  	)
   818  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   819  
   820  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   821  		source := newTestPeer(name, t, term)
   822  		source.accountTrie = sourceAccountTrie.Copy()
   823  		source.accountValues = elems
   824  		source.setStorageTries(storageTries)
   825  		source.storageValues = storageElems
   826  
   827  		if !noAccount {
   828  			source.accountRequestHandler = emptyRequestAccountRangeFn
   829  		}
   830  		if !noStorage {
   831  			source.storageRequestHandler = emptyStorageRequestHandler
   832  		}
   833  		if !noTrieNode {
   834  			source.trieRequestHandler = emptyTrieRequestHandler
   835  		}
   836  		return source
   837  	}
   838  
   839  	syncer := setupSyncer(
   840  		nodeScheme,
   841  		mkSource("full", true, true, true),
   842  		mkSource("noAccounts", false, true, true),
   843  		mkSource("noStorage", true, false, true),
   844  		mkSource("noTrie", true, true, false),
   845  	)
   846  	// We're setting the timeout to very low, to increase the chance of the timeout
   847  	// being triggered. This was previously a cause of panic, when a response
   848  	// arrived simultaneously as a timeout was triggered.
   849  	syncer.rates.OverrideTTLLimit = time.Millisecond
   850  
   851  	done := checkStall(t, term)
   852  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   853  		t.Fatalf("sync failed: %v", err)
   854  	}
   855  	close(done)
   856  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   857  }
   858  
   859  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   860  func TestMultiSyncManyUnresponsive(t *testing.T) {
   861  	var (
   862  		once   sync.Once
   863  		cancel = make(chan struct{})
   864  		term   = func() {
   865  			once.Do(func() {
   866  				close(cancel)
   867  			})
   868  		}
   869  	)
   870  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   871  
   872  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   873  		source := newTestPeer(name, t, term)
   874  		source.accountTrie = sourceAccountTrie.Copy()
   875  		source.accountValues = elems
   876  		source.setStorageTries(storageTries)
   877  		source.storageValues = storageElems
   878  
   879  		if !noAccount {
   880  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   881  		}
   882  		if !noStorage {
   883  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   884  		}
   885  		if !noTrieNode {
   886  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   887  		}
   888  		return source
   889  	}
   890  
   891  	syncer := setupSyncer(
   892  		nodeScheme,
   893  		mkSource("full", true, true, true),
   894  		mkSource("noAccounts", false, true, true),
   895  		mkSource("noStorage", true, false, true),
   896  		mkSource("noTrie", true, true, false),
   897  	)
   898  	// We're setting the timeout to very low, to make the test run a bit faster
   899  	syncer.rates.OverrideTTLLimit = time.Millisecond
   900  
   901  	done := checkStall(t, term)
   902  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   903  		t.Fatalf("sync failed: %v", err)
   904  	}
   905  	close(done)
   906  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   907  }
   908  
   909  func checkStall(t *testing.T, term func()) chan struct{} {
   910  	testDone := make(chan struct{})
   911  	go func() {
   912  		select {
   913  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   914  			t.Log("Sync stalled")
   915  			term()
   916  		case <-testDone:
   917  			return
   918  		}
   919  	}()
   920  	return testDone
   921  }
   922  
   923  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   924  // account trie has a few boundary elements.
   925  func TestSyncBoundaryAccountTrie(t *testing.T) {
   926  	t.Parallel()
   927  
   928  	var (
   929  		once   sync.Once
   930  		cancel = make(chan struct{})
   931  		term   = func() {
   932  			once.Do(func() {
   933  				close(cancel)
   934  			})
   935  		}
   936  	)
   937  	nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
   938  
   939  	mkSource := func(name string) *testPeer {
   940  		source := newTestPeer(name, t, term)
   941  		source.accountTrie = sourceAccountTrie.Copy()
   942  		source.accountValues = elems
   943  		return source
   944  	}
   945  	syncer := setupSyncer(
   946  		nodeScheme,
   947  		mkSource("peer-a"),
   948  		mkSource("peer-b"),
   949  	)
   950  	done := checkStall(t, term)
   951  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   952  		t.Fatalf("sync failed: %v", err)
   953  	}
   954  	close(done)
   955  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   956  }
   957  
   958  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
   959  // consistently returning very small results
   960  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
   961  	t.Parallel()
   962  
   963  	var (
   964  		once   sync.Once
   965  		cancel = make(chan struct{})
   966  		term   = func() {
   967  			once.Do(func() {
   968  				close(cancel)
   969  			})
   970  		}
   971  	)
   972  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   973  
   974  	mkSource := func(name string, slow bool) *testPeer {
   975  		source := newTestPeer(name, t, term)
   976  		source.accountTrie = sourceAccountTrie.Copy()
   977  		source.accountValues = elems
   978  
   979  		if slow {
   980  			source.accountRequestHandler = starvingAccountRequestHandler
   981  		}
   982  		return source
   983  	}
   984  
   985  	syncer := setupSyncer(
   986  		nodeScheme,
   987  		mkSource("nice-a", false),
   988  		mkSource("nice-b", false),
   989  		mkSource("nice-c", false),
   990  		mkSource("capped", true),
   991  	)
   992  	done := checkStall(t, term)
   993  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   994  		t.Fatalf("sync failed: %v", err)
   995  	}
   996  	close(done)
   997  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   998  }
   999  
  1000  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  1001  // code requests properly.
  1002  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  1003  	t.Parallel()
  1004  
  1005  	var (
  1006  		once   sync.Once
  1007  		cancel = make(chan struct{})
  1008  		term   = func() {
  1009  			once.Do(func() {
  1010  				close(cancel)
  1011  			})
  1012  		}
  1013  	)
  1014  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1015  
  1016  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1017  		source := newTestPeer(name, t, term)
  1018  		source.accountTrie = sourceAccountTrie.Copy()
  1019  		source.accountValues = elems
  1020  		source.codeRequestHandler = codeFn
  1021  		return source
  1022  	}
  1023  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1024  	// chance that the full set of codes requested are sent only to the
  1025  	// non-corrupt peer, which delivers everything in one go, and makes the
  1026  	// test moot
  1027  	syncer := setupSyncer(
  1028  		nodeScheme,
  1029  		mkSource("capped", cappedCodeRequestHandler),
  1030  		mkSource("corrupt", corruptCodeRequestHandler),
  1031  	)
  1032  	done := checkStall(t, term)
  1033  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1034  		t.Fatalf("sync failed: %v", err)
  1035  	}
  1036  	close(done)
  1037  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1038  }
  1039  
  1040  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1041  	t.Parallel()
  1042  
  1043  	var (
  1044  		once   sync.Once
  1045  		cancel = make(chan struct{})
  1046  		term   = func() {
  1047  			once.Do(func() {
  1048  				close(cancel)
  1049  			})
  1050  		}
  1051  	)
  1052  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1053  
  1054  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1055  		source := newTestPeer(name, t, term)
  1056  		source.accountTrie = sourceAccountTrie.Copy()
  1057  		source.accountValues = elems
  1058  		source.accountRequestHandler = accFn
  1059  		return source
  1060  	}
  1061  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1062  	// chance that the full set of codes requested are sent only to the
  1063  	// non-corrupt peer, which delivers everything in one go, and makes the
  1064  	// test moot
  1065  	syncer := setupSyncer(
  1066  		nodeScheme,
  1067  		mkSource("capped", defaultAccountRequestHandler),
  1068  		mkSource("corrupt", corruptAccountRequestHandler),
  1069  	)
  1070  	done := checkStall(t, term)
  1071  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1072  		t.Fatalf("sync failed: %v", err)
  1073  	}
  1074  	close(done)
  1075  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1076  }
  1077  
  1078  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1079  // one by one
  1080  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1081  	t.Parallel()
  1082  
  1083  	var (
  1084  		once   sync.Once
  1085  		cancel = make(chan struct{})
  1086  		term   = func() {
  1087  			once.Do(func() {
  1088  				close(cancel)
  1089  			})
  1090  		}
  1091  	)
  1092  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1093  
  1094  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1095  		source := newTestPeer(name, t, term)
  1096  		source.accountTrie = sourceAccountTrie.Copy()
  1097  		source.accountValues = elems
  1098  		source.codeRequestHandler = codeFn
  1099  		return source
  1100  	}
  1101  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1102  	// so it shouldn't be more than that
  1103  	var counter int
  1104  	syncer := setupSyncer(
  1105  		nodeScheme,
  1106  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1107  			counter++
  1108  			return cappedCodeRequestHandler(t, id, hashes, max)
  1109  		}),
  1110  	)
  1111  	done := checkStall(t, term)
  1112  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1113  		t.Fatalf("sync failed: %v", err)
  1114  	}
  1115  	close(done)
  1116  
  1117  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1118  	// deduplication is per request batch. If it were a perfect global dedup,
  1119  	// we would expect only 8 requests. If there were no dedup, there would be
  1120  	// 3k requests.
  1121  	// We expect somewhere below 100 requests for these 8 unique hashes. But
  1122  	// the number can be flaky, so don't limit it so strictly.
  1123  	if threshold := 100; counter > threshold {
  1124  		t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
  1125  	}
  1126  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1127  }
  1128  
  1129  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1130  // storage trie has a few boundary elements.
  1131  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1132  	t.Parallel()
  1133  
  1134  	var (
  1135  		once   sync.Once
  1136  		cancel = make(chan struct{})
  1137  		term   = func() {
  1138  			once.Do(func() {
  1139  				close(cancel)
  1140  			})
  1141  		}
  1142  	)
  1143  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1144  
  1145  	mkSource := func(name string) *testPeer {
  1146  		source := newTestPeer(name, t, term)
  1147  		source.accountTrie = sourceAccountTrie.Copy()
  1148  		source.accountValues = elems
  1149  		source.setStorageTries(storageTries)
  1150  		source.storageValues = storageElems
  1151  		return source
  1152  	}
  1153  	syncer := setupSyncer(
  1154  		nodeScheme,
  1155  		mkSource("peer-a"),
  1156  		mkSource("peer-b"),
  1157  	)
  1158  	done := checkStall(t, term)
  1159  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1160  		t.Fatalf("sync failed: %v", err)
  1161  	}
  1162  	close(done)
  1163  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1164  }
  1165  
  1166  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1167  // consistently returning very small results
  1168  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1169  	t.Parallel()
  1170  
  1171  	var (
  1172  		once   sync.Once
  1173  		cancel = make(chan struct{})
  1174  		term   = func() {
  1175  			once.Do(func() {
  1176  				close(cancel)
  1177  			})
  1178  		}
  1179  	)
  1180  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1181  
  1182  	mkSource := func(name string, slow bool) *testPeer {
  1183  		source := newTestPeer(name, t, term)
  1184  		source.accountTrie = sourceAccountTrie.Copy()
  1185  		source.accountValues = elems
  1186  		source.setStorageTries(storageTries)
  1187  		source.storageValues = storageElems
  1188  
  1189  		if slow {
  1190  			source.storageRequestHandler = starvingStorageRequestHandler
  1191  		}
  1192  		return source
  1193  	}
  1194  
  1195  	syncer := setupSyncer(
  1196  		nodeScheme,
  1197  		mkSource("nice-a", false),
  1198  		mkSource("slow", true),
  1199  	)
  1200  	done := checkStall(t, term)
  1201  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1202  		t.Fatalf("sync failed: %v", err)
  1203  	}
  1204  	close(done)
  1205  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1206  }
  1207  
  1208  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1209  // sometimes sending bad proofs
  1210  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1211  	t.Parallel()
  1212  
  1213  	var (
  1214  		once   sync.Once
  1215  		cancel = make(chan struct{})
  1216  		term   = func() {
  1217  			once.Do(func() {
  1218  				close(cancel)
  1219  			})
  1220  		}
  1221  	)
  1222  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1223  
  1224  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1225  		source := newTestPeer(name, t, term)
  1226  		source.accountTrie = sourceAccountTrie.Copy()
  1227  		source.accountValues = elems
  1228  		source.setStorageTries(storageTries)
  1229  		source.storageValues = storageElems
  1230  		source.storageRequestHandler = handler
  1231  		return source
  1232  	}
  1233  
  1234  	syncer := setupSyncer(
  1235  		nodeScheme,
  1236  		mkSource("nice-a", defaultStorageRequestHandler),
  1237  		mkSource("nice-b", defaultStorageRequestHandler),
  1238  		mkSource("nice-c", defaultStorageRequestHandler),
  1239  		mkSource("corrupt", corruptStorageRequestHandler),
  1240  	)
  1241  	done := checkStall(t, term)
  1242  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1243  		t.Fatalf("sync failed: %v", err)
  1244  	}
  1245  	close(done)
  1246  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1247  }
  1248  
  1249  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1250  	t.Parallel()
  1251  
  1252  	var (
  1253  		once   sync.Once
  1254  		cancel = make(chan struct{})
  1255  		term   = func() {
  1256  			once.Do(func() {
  1257  				close(cancel)
  1258  			})
  1259  		}
  1260  	)
  1261  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1262  
  1263  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1264  		source := newTestPeer(name, t, term)
  1265  		source.accountTrie = sourceAccountTrie.Copy()
  1266  		source.accountValues = elems
  1267  		source.setStorageTries(storageTries)
  1268  		source.storageValues = storageElems
  1269  		source.storageRequestHandler = handler
  1270  		return source
  1271  	}
  1272  	syncer := setupSyncer(
  1273  		nodeScheme,
  1274  		mkSource("nice-a", defaultStorageRequestHandler),
  1275  		mkSource("nice-b", defaultStorageRequestHandler),
  1276  		mkSource("nice-c", defaultStorageRequestHandler),
  1277  		mkSource("corrupt", noProofStorageRequestHandler),
  1278  	)
  1279  	done := checkStall(t, term)
  1280  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1281  		t.Fatalf("sync failed: %v", err)
  1282  	}
  1283  	close(done)
  1284  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1285  }
  1286  
  1287  // TestSyncWithStorage tests  basic sync using accounts + storage + code, against
  1288  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1289  // an error, where the recipient erroneously clipped the boundary nodes, but
  1290  // did not mark the account for healing.
  1291  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1292  	t.Parallel()
  1293  	var (
  1294  		once   sync.Once
  1295  		cancel = make(chan struct{})
  1296  		term   = func() {
  1297  			once.Do(func() {
  1298  				close(cancel)
  1299  			})
  1300  		}
  1301  	)
  1302  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1303  
  1304  	mkSource := func(name string) *testPeer {
  1305  		source := newTestPeer(name, t, term)
  1306  		source.accountTrie = sourceAccountTrie.Copy()
  1307  		source.accountValues = elems
  1308  		source.setStorageTries(storageTries)
  1309  		source.storageValues = storageElems
  1310  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1311  		return source
  1312  	}
  1313  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
  1314  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1315  		t.Fatalf("sync failed: %v", err)
  1316  	}
  1317  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1318  }
  1319  
  1320  type kv struct {
  1321  	k, v []byte
  1322  }
  1323  
  1324  // Some helpers for sorting
  1325  type entrySlice []*kv
  1326  
  1327  func (p entrySlice) Len() int           { return len(p) }
  1328  func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1329  func (p entrySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
  1330  
  1331  func key32(i uint64) []byte {
  1332  	key := make([]byte, 32)
  1333  	binary.LittleEndian.PutUint64(key, i)
  1334  	return key
  1335  }
  1336  
  1337  var (
  1338  	codehashes = []common.Hash{
  1339  		crypto.Keccak256Hash([]byte{0}),
  1340  		crypto.Keccak256Hash([]byte{1}),
  1341  		crypto.Keccak256Hash([]byte{2}),
  1342  		crypto.Keccak256Hash([]byte{3}),
  1343  		crypto.Keccak256Hash([]byte{4}),
  1344  		crypto.Keccak256Hash([]byte{5}),
  1345  		crypto.Keccak256Hash([]byte{6}),
  1346  		crypto.Keccak256Hash([]byte{7}),
  1347  	}
  1348  )
  1349  
  1350  // getCodeHash returns a pseudo-random code hash
  1351  func getCodeHash(i uint64) []byte {
  1352  	h := codehashes[int(i)%len(codehashes)]
  1353  	return common.CopyBytes(h[:])
  1354  }
  1355  
  1356  // getCodeByHash convenience function to lookup the code from the code hash
  1357  func getCodeByHash(hash common.Hash) []byte {
  1358  	if hash == types.EmptyCodeHash {
  1359  		return nil
  1360  	}
  1361  	for i, h := range codehashes {
  1362  		if h == hash {
  1363  			return []byte{byte(i)}
  1364  		}
  1365  	}
  1366  	return nil
  1367  }
  1368  
  1369  // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1370  func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
  1371  	var (
  1372  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1373  		accTrie = trie.NewEmpty(db)
  1374  		entries entrySlice
  1375  	)
  1376  	for i := uint64(1); i <= uint64(n); i++ {
  1377  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1378  			Nonce:    i,
  1379  			Balance:  big.NewInt(int64(i)),
  1380  			Root:     types.EmptyRootHash,
  1381  			CodeHash: getCodeHash(i),
  1382  		})
  1383  		key := key32(i)
  1384  		elem := &kv{key, value}
  1385  		accTrie.MustUpdate(elem.k, elem.v)
  1386  		entries = append(entries, elem)
  1387  	}
  1388  	sort.Sort(entries)
  1389  
  1390  	// Commit the state changes into db and re-create the trie
  1391  	// for accessing later.
  1392  	root, nodes := accTrie.Commit(false)
  1393  	db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
  1394  
  1395  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1396  	return db.Scheme(), accTrie, entries
  1397  }
  1398  
  1399  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1400  // accounts normally, this function will fill a few accounts which have
  1401  // boundary hash.
  1402  func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
  1403  	var (
  1404  		entries    entrySlice
  1405  		boundaries []common.Hash
  1406  
  1407  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1408  		accTrie = trie.NewEmpty(db)
  1409  	)
  1410  	// Initialize boundaries
  1411  	var next common.Hash
  1412  	step := new(big.Int).Sub(
  1413  		new(big.Int).Div(
  1414  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1415  			big.NewInt(int64(accountConcurrency)),
  1416  		), common.Big1,
  1417  	)
  1418  	for i := 0; i < accountConcurrency; i++ {
  1419  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1420  		if i == accountConcurrency-1 {
  1421  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1422  		}
  1423  		boundaries = append(boundaries, last)
  1424  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1425  	}
  1426  	// Fill boundary accounts
  1427  	for i := 0; i < len(boundaries); i++ {
  1428  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1429  			Nonce:    uint64(0),
  1430  			Balance:  big.NewInt(int64(i)),
  1431  			Root:     types.EmptyRootHash,
  1432  			CodeHash: getCodeHash(uint64(i)),
  1433  		})
  1434  		elem := &kv{boundaries[i].Bytes(), value}
  1435  		accTrie.MustUpdate(elem.k, elem.v)
  1436  		entries = append(entries, elem)
  1437  	}
  1438  	// Fill other accounts if required
  1439  	for i := uint64(1); i <= uint64(n); i++ {
  1440  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1441  			Nonce:    i,
  1442  			Balance:  big.NewInt(int64(i)),
  1443  			Root:     types.EmptyRootHash,
  1444  			CodeHash: getCodeHash(i),
  1445  		})
  1446  		elem := &kv{key32(i), value}
  1447  		accTrie.MustUpdate(elem.k, elem.v)
  1448  		entries = append(entries, elem)
  1449  	}
  1450  	sort.Sort(entries)
  1451  
  1452  	// Commit the state changes into db and re-create the trie
  1453  	// for accessing later.
  1454  	root, nodes := accTrie.Commit(false)
  1455  	db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
  1456  
  1457  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1458  	return db.Scheme(), accTrie, entries
  1459  }
  1460  
  1461  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1462  // has a unique storage set.
  1463  func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1464  	var (
  1465  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1466  		accTrie        = trie.NewEmpty(db)
  1467  		entries        entrySlice
  1468  		storageRoots   = make(map[common.Hash]common.Hash)
  1469  		storageTries   = make(map[common.Hash]*trie.Trie)
  1470  		storageEntries = make(map[common.Hash]entrySlice)
  1471  		nodes          = trienode.NewMergedNodeSet()
  1472  	)
  1473  	// Create n accounts in the trie
  1474  	for i := uint64(1); i <= uint64(accounts); i++ {
  1475  		key := key32(i)
  1476  		codehash := types.EmptyCodeHash.Bytes()
  1477  		if code {
  1478  			codehash = getCodeHash(i)
  1479  		}
  1480  		// Create a storage trie
  1481  		stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
  1482  		nodes.Merge(stNodes)
  1483  
  1484  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1485  			Nonce:    i,
  1486  			Balance:  big.NewInt(int64(i)),
  1487  			Root:     stRoot,
  1488  			CodeHash: codehash,
  1489  		})
  1490  		elem := &kv{key, value}
  1491  		accTrie.MustUpdate(elem.k, elem.v)
  1492  		entries = append(entries, elem)
  1493  
  1494  		storageRoots[common.BytesToHash(key)] = stRoot
  1495  		storageEntries[common.BytesToHash(key)] = stEntries
  1496  	}
  1497  	sort.Sort(entries)
  1498  
  1499  	// Commit account trie
  1500  	root, set := accTrie.Commit(true)
  1501  	nodes.Merge(set)
  1502  
  1503  	// Commit gathered dirty nodes into database
  1504  	db.Update(root, types.EmptyRootHash, nodes)
  1505  
  1506  	// Re-create tries with new root
  1507  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1508  	for i := uint64(1); i <= uint64(accounts); i++ {
  1509  		key := key32(i)
  1510  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1511  		trie, _ := trie.New(id, db)
  1512  		storageTries[common.BytesToHash(key)] = trie
  1513  	}
  1514  	return db.Scheme(), accTrie, entries, storageTries, storageEntries
  1515  }
  1516  
  1517  // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1518  func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1519  	var (
  1520  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1521  		accTrie        = trie.NewEmpty(db)
  1522  		entries        entrySlice
  1523  		storageRoots   = make(map[common.Hash]common.Hash)
  1524  		storageTries   = make(map[common.Hash]*trie.Trie)
  1525  		storageEntries = make(map[common.Hash]entrySlice)
  1526  		nodes          = trienode.NewMergedNodeSet()
  1527  	)
  1528  	// Create n accounts in the trie
  1529  	for i := uint64(1); i <= uint64(accounts); i++ {
  1530  		key := key32(i)
  1531  		codehash := types.EmptyCodeHash.Bytes()
  1532  		if code {
  1533  			codehash = getCodeHash(i)
  1534  		}
  1535  		// Make a storage trie
  1536  		var (
  1537  			stRoot    common.Hash
  1538  			stNodes   *trienode.NodeSet
  1539  			stEntries entrySlice
  1540  		)
  1541  		if boundary {
  1542  			stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
  1543  		} else {
  1544  			stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
  1545  		}
  1546  		nodes.Merge(stNodes)
  1547  
  1548  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1549  			Nonce:    i,
  1550  			Balance:  big.NewInt(int64(i)),
  1551  			Root:     stRoot,
  1552  			CodeHash: codehash,
  1553  		})
  1554  		elem := &kv{key, value}
  1555  		accTrie.MustUpdate(elem.k, elem.v)
  1556  		entries = append(entries, elem)
  1557  
  1558  		// we reuse the same one for all accounts
  1559  		storageRoots[common.BytesToHash(key)] = stRoot
  1560  		storageEntries[common.BytesToHash(key)] = stEntries
  1561  	}
  1562  	sort.Sort(entries)
  1563  
  1564  	// Commit account trie
  1565  	root, set := accTrie.Commit(true)
  1566  	nodes.Merge(set)
  1567  
  1568  	// Commit gathered dirty nodes into database
  1569  	db.Update(root, types.EmptyRootHash, nodes)
  1570  
  1571  	// Re-create tries with new root
  1572  	accTrie, err := trie.New(trie.StateTrieID(root), db)
  1573  	if err != nil {
  1574  		panic(err)
  1575  	}
  1576  	for i := uint64(1); i <= uint64(accounts); i++ {
  1577  		key := key32(i)
  1578  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1579  		trie, err := trie.New(id, db)
  1580  		if err != nil {
  1581  			panic(err)
  1582  		}
  1583  		storageTries[common.BytesToHash(key)] = trie
  1584  	}
  1585  	return db.Scheme(), accTrie, entries, storageTries, storageEntries
  1586  }
  1587  
  1588  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1589  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1590  // that tries are unique.
  1591  func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, entrySlice) {
  1592  	trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1593  	var entries entrySlice
  1594  	for i := uint64(1); i <= n; i++ {
  1595  		// store 'x' at slot 'x'
  1596  		slotValue := key32(i + seed)
  1597  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1598  
  1599  		slotKey := key32(i)
  1600  		key := crypto.Keccak256Hash(slotKey[:])
  1601  
  1602  		elem := &kv{key[:], rlpSlotValue}
  1603  		trie.MustUpdate(elem.k, elem.v)
  1604  		entries = append(entries, elem)
  1605  	}
  1606  	sort.Sort(entries)
  1607  	root, nodes := trie.Commit(false)
  1608  	return root, nodes, entries
  1609  }
  1610  
  1611  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1612  // storage slots normally, this function will fill a few slots which have
  1613  // boundary hash.
  1614  func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trienode.NodeSet, entrySlice) {
  1615  	var (
  1616  		entries    entrySlice
  1617  		boundaries []common.Hash
  1618  		trie, _    = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1619  	)
  1620  	// Initialize boundaries
  1621  	var next common.Hash
  1622  	step := new(big.Int).Sub(
  1623  		new(big.Int).Div(
  1624  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1625  			big.NewInt(int64(accountConcurrency)),
  1626  		), common.Big1,
  1627  	)
  1628  	for i := 0; i < accountConcurrency; i++ {
  1629  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1630  		if i == accountConcurrency-1 {
  1631  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1632  		}
  1633  		boundaries = append(boundaries, last)
  1634  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1635  	}
  1636  	// Fill boundary slots
  1637  	for i := 0; i < len(boundaries); i++ {
  1638  		key := boundaries[i]
  1639  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1640  
  1641  		elem := &kv{key[:], val}
  1642  		trie.MustUpdate(elem.k, elem.v)
  1643  		entries = append(entries, elem)
  1644  	}
  1645  	// Fill other slots if required
  1646  	for i := uint64(1); i <= uint64(n); i++ {
  1647  		slotKey := key32(i)
  1648  		key := crypto.Keccak256Hash(slotKey[:])
  1649  
  1650  		slotValue := key32(i)
  1651  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1652  
  1653  		elem := &kv{key[:], rlpSlotValue}
  1654  		trie.MustUpdate(elem.k, elem.v)
  1655  		entries = append(entries, elem)
  1656  	}
  1657  	sort.Sort(entries)
  1658  	root, nodes := trie.Commit(false)
  1659  	return root, nodes, entries
  1660  }
  1661  
  1662  func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1663  	t.Helper()
  1664  	triedb := trie.NewDatabase(rawdb.NewDatabase(db))
  1665  	accTrie, err := trie.New(trie.StateTrieID(root), triedb)
  1666  	if err != nil {
  1667  		t.Fatal(err)
  1668  	}
  1669  	accounts, slots := 0, 0
  1670  	accIt := trie.NewIterator(accTrie.NodeIterator(nil))
  1671  	for accIt.Next() {
  1672  		var acc struct {
  1673  			Nonce    uint64
  1674  			Balance  *big.Int
  1675  			Root     common.Hash
  1676  			CodeHash []byte
  1677  		}
  1678  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1679  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1680  		}
  1681  		accounts++
  1682  		if acc.Root != types.EmptyRootHash {
  1683  			id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root)
  1684  			storeTrie, err := trie.NewStateTrie(id, triedb)
  1685  			if err != nil {
  1686  				t.Fatal(err)
  1687  			}
  1688  			storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
  1689  			for storeIt.Next() {
  1690  				slots++
  1691  			}
  1692  			if err := storeIt.Err; err != nil {
  1693  				t.Fatal(err)
  1694  			}
  1695  		}
  1696  	}
  1697  	if err := accIt.Err; err != nil {
  1698  		t.Fatal(err)
  1699  	}
  1700  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1701  }
  1702  
  1703  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1704  // state healing
  1705  func TestSyncAccountPerformance(t *testing.T) {
  1706  	// Set the account concurrency to 1. This _should_ result in the
  1707  	// range root to become correct, and there should be no healing needed
  1708  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1709  	accountConcurrency = 1
  1710  
  1711  	var (
  1712  		once   sync.Once
  1713  		cancel = make(chan struct{})
  1714  		term   = func() {
  1715  			once.Do(func() {
  1716  				close(cancel)
  1717  			})
  1718  		}
  1719  	)
  1720  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  1721  
  1722  	mkSource := func(name string) *testPeer {
  1723  		source := newTestPeer(name, t, term)
  1724  		source.accountTrie = sourceAccountTrie.Copy()
  1725  		source.accountValues = elems
  1726  		return source
  1727  	}
  1728  	src := mkSource("source")
  1729  	syncer := setupSyncer(nodeScheme, src)
  1730  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1731  		t.Fatalf("sync failed: %v", err)
  1732  	}
  1733  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1734  	// The trie root will always be requested, since it is added when the snap
  1735  	// sync cycle starts. When popping the queue, we do not look it up again.
  1736  	// Doing so would bring this number down to zero in this artificial testcase,
  1737  	// but only add extra IO for no reason in practice.
  1738  	if have, want := src.nTrienodeRequests, 1; have != want {
  1739  		fmt.Print(src.Stats())
  1740  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1741  	}
  1742  }
  1743  
  1744  func TestSlotEstimation(t *testing.T) {
  1745  	for i, tc := range []struct {
  1746  		last  common.Hash
  1747  		count int
  1748  		want  uint64
  1749  	}{
  1750  		{
  1751  			// Half the space
  1752  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1753  			100,
  1754  			100,
  1755  		},
  1756  		{
  1757  			// 1 / 16th
  1758  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1759  			100,
  1760  			1500,
  1761  		},
  1762  		{
  1763  			// Bit more than 1 / 16th
  1764  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1765  			100,
  1766  			1499,
  1767  		},
  1768  		{
  1769  			// Almost everything
  1770  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1771  			100,
  1772  			6,
  1773  		},
  1774  		{
  1775  			// Almost nothing -- should lead to error
  1776  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1777  			1,
  1778  			0,
  1779  		},
  1780  		{
  1781  			// Nothing -- should lead to error
  1782  			common.Hash{},
  1783  			100,
  1784  			0,
  1785  		},
  1786  	} {
  1787  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1788  		if want := tc.want; have != want {
  1789  			t.Errorf("test %d: have %d want %d", i, have, want)
  1790  		}
  1791  	}
  1792  }