github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/eth/protocols/snap/sync_test.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"slices"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/core/rawdb"
    33  	"github.com/ethereum/go-ethereum/core/types"
    34  	"github.com/ethereum/go-ethereum/crypto"
    35  	"github.com/ethereum/go-ethereum/ethdb"
    36  	"github.com/ethereum/go-ethereum/internal/testrand"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/rlp"
    39  	"github.com/ethereum/go-ethereum/trie"
    40  	"github.com/ethereum/go-ethereum/trie/trienode"
    41  	"github.com/ethereum/go-ethereum/triedb"
    42  	"github.com/ethereum/go-ethereum/triedb/pathdb"
    43  	"github.com/holiman/uint256"
    44  	"golang.org/x/crypto/sha3"
    45  )
    46  
    47  func TestHashing(t *testing.T) {
    48  	t.Parallel()
    49  
    50  	var bytecodes = make([][]byte, 10)
    51  	for i := 0; i < len(bytecodes); i++ {
    52  		buf := make([]byte, 100)
    53  		rand.Read(buf)
    54  		bytecodes[i] = buf
    55  	}
    56  	var want, got string
    57  	var old = func() {
    58  		hasher := sha3.NewLegacyKeccak256()
    59  		for i := 0; i < len(bytecodes); i++ {
    60  			hasher.Reset()
    61  			hasher.Write(bytecodes[i])
    62  			hash := hasher.Sum(nil)
    63  			got = fmt.Sprintf("%v\n%v", got, hash)
    64  		}
    65  	}
    66  	var new = func() {
    67  		hasher := crypto.NewKeccakState()
    68  		var hash = make([]byte, 32)
    69  		for i := 0; i < len(bytecodes); i++ {
    70  			hasher.Reset()
    71  			hasher.Write(bytecodes[i])
    72  			hasher.Read(hash)
    73  			want = fmt.Sprintf("%v\n%v", want, hash)
    74  		}
    75  	}
    76  	old()
    77  	new()
    78  	if want != got {
    79  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    80  	}
    81  }
    82  
    83  func BenchmarkHashing(b *testing.B) {
    84  	var bytecodes = make([][]byte, 10000)
    85  	for i := 0; i < len(bytecodes); i++ {
    86  		buf := make([]byte, 100)
    87  		rand.Read(buf)
    88  		bytecodes[i] = buf
    89  	}
    90  	var old = func() {
    91  		hasher := sha3.NewLegacyKeccak256()
    92  		for i := 0; i < len(bytecodes); i++ {
    93  			hasher.Reset()
    94  			hasher.Write(bytecodes[i])
    95  			hasher.Sum(nil)
    96  		}
    97  	}
    98  	var new = func() {
    99  		hasher := crypto.NewKeccakState()
   100  		var hash = make([]byte, 32)
   101  		for i := 0; i < len(bytecodes); i++ {
   102  			hasher.Reset()
   103  			hasher.Write(bytecodes[i])
   104  			hasher.Read(hash)
   105  		}
   106  	}
   107  	b.Run("old", func(b *testing.B) {
   108  		b.ReportAllocs()
   109  		for i := 0; i < b.N; i++ {
   110  			old()
   111  		}
   112  	})
   113  	b.Run("new", func(b *testing.B) {
   114  		b.ReportAllocs()
   115  		for i := 0; i < b.N; i++ {
   116  			new()
   117  		}
   118  	})
   119  }
   120  
   121  type (
   122  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   123  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   124  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   125  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   126  )
   127  
   128  type testPeer struct {
   129  	id            string
   130  	test          *testing.T
   131  	remote        *Syncer
   132  	logger        log.Logger
   133  	accountTrie   *trie.Trie
   134  	accountValues []*kv
   135  	storageTries  map[common.Hash]*trie.Trie
   136  	storageValues map[common.Hash][]*kv
   137  
   138  	accountRequestHandler accountHandlerFunc
   139  	storageRequestHandler storageHandlerFunc
   140  	trieRequestHandler    trieHandlerFunc
   141  	codeRequestHandler    codeHandlerFunc
   142  	term                  func()
   143  
   144  	// counters
   145  	nAccountRequests  int
   146  	nStorageRequests  int
   147  	nBytecodeRequests int
   148  	nTrienodeRequests int
   149  }
   150  
   151  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   152  	peer := &testPeer{
   153  		id:                    id,
   154  		test:                  t,
   155  		logger:                log.New("id", id),
   156  		accountRequestHandler: defaultAccountRequestHandler,
   157  		trieRequestHandler:    defaultTrieRequestHandler,
   158  		storageRequestHandler: defaultStorageRequestHandler,
   159  		codeRequestHandler:    defaultCodeRequestHandler,
   160  		term:                  term,
   161  	}
   162  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   163  	//peer.logger.SetHandler(stderrHandler)
   164  	return peer
   165  }
   166  
   167  func (t *testPeer) setStorageTries(tries map[common.Hash]*trie.Trie) {
   168  	t.storageTries = make(map[common.Hash]*trie.Trie)
   169  	for root, trie := range tries {
   170  		t.storageTries[root] = trie.Copy()
   171  	}
   172  }
   173  
   174  func (t *testPeer) ID() string      { return t.id }
   175  func (t *testPeer) Log() log.Logger { return t.logger }
   176  
   177  func (t *testPeer) Stats() string {
   178  	return fmt.Sprintf(`Account requests: %d
   179  Storage requests: %d
   180  Bytecode requests: %d
   181  Trienode requests: %d
   182  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   183  }
   184  
   185  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   186  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   187  	t.nAccountRequests++
   188  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   189  	return nil
   190  }
   191  
   192  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   193  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   194  	t.nTrienodeRequests++
   195  	go t.trieRequestHandler(t, id, root, paths, bytes)
   196  	return nil
   197  }
   198  
   199  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   200  	t.nStorageRequests++
   201  	if len(accounts) == 1 && origin != nil {
   202  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   203  	} else {
   204  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   205  	}
   206  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   207  	return nil
   208  }
   209  
   210  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   211  	t.nBytecodeRequests++
   212  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   213  	go t.codeRequestHandler(t, id, hashes, bytes)
   214  	return nil
   215  }
   216  
   217  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   218  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   219  	// Pass the response
   220  	var nodes [][]byte
   221  	for _, pathset := range paths {
   222  		switch len(pathset) {
   223  		case 1:
   224  			blob, _, err := t.accountTrie.GetNode(pathset[0])
   225  			if err != nil {
   226  				t.logger.Info("Error handling req", "error", err)
   227  				break
   228  			}
   229  			nodes = append(nodes, blob)
   230  		default:
   231  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   232  			for _, path := range pathset[1:] {
   233  				blob, _, err := account.GetNode(path)
   234  				if err != nil {
   235  					t.logger.Info("Error handling req", "error", err)
   236  					break
   237  				}
   238  				nodes = append(nodes, blob)
   239  			}
   240  		}
   241  	}
   242  	t.remote.OnTrieNodes(t, requestId, nodes)
   243  	return nil
   244  }
   245  
   246  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   247  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   248  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   249  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   250  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   251  		t.term()
   252  		return err
   253  	}
   254  	return nil
   255  }
   256  
   257  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   258  	var size uint64
   259  	if limit == (common.Hash{}) {
   260  		limit = common.MaxHash
   261  	}
   262  	for _, entry := range t.accountValues {
   263  		if size > cap {
   264  			break
   265  		}
   266  		if bytes.Compare(origin[:], entry.k) <= 0 {
   267  			keys = append(keys, common.BytesToHash(entry.k))
   268  			vals = append(vals, entry.v)
   269  			size += uint64(32 + len(entry.v))
   270  		}
   271  		// If we've exceeded the request threshold, abort
   272  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   273  			break
   274  		}
   275  	}
   276  	// Unless we send the entire trie, we need to supply proofs
   277  	// Actually, we need to supply proofs either way! This seems to be an implementation
   278  	// quirk in go-ethereum
   279  	proof := trienode.NewProofSet()
   280  	if err := t.accountTrie.Prove(origin[:], proof); err != nil {
   281  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   282  	}
   283  	if len(keys) > 0 {
   284  		lastK := (keys[len(keys)-1])[:]
   285  		if err := t.accountTrie.Prove(lastK, proof); err != nil {
   286  			t.logger.Error("Could not prove last item", "error", err)
   287  		}
   288  	}
   289  	for _, blob := range proof.List() {
   290  		proofs = append(proofs, blob)
   291  	}
   292  	return keys, vals, proofs
   293  }
   294  
   295  // defaultStorageRequestHandler is a well-behaving storage request handler
   296  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   297  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   298  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   299  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   300  		t.term()
   301  	}
   302  	return nil
   303  }
   304  
   305  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   306  	var bytecodes [][]byte
   307  	for _, h := range hashes {
   308  		bytecodes = append(bytecodes, getCodeByHash(h))
   309  	}
   310  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   311  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   312  		t.term()
   313  	}
   314  	return nil
   315  }
   316  
   317  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   318  	var size uint64
   319  	for _, account := range accounts {
   320  		// The first account might start from a different origin and end sooner
   321  		var originHash common.Hash
   322  		if len(origin) > 0 {
   323  			originHash = common.BytesToHash(origin)
   324  		}
   325  		var limitHash = common.MaxHash
   326  		if len(limit) > 0 {
   327  			limitHash = common.BytesToHash(limit)
   328  		}
   329  		var (
   330  			keys  []common.Hash
   331  			vals  [][]byte
   332  			abort bool
   333  		)
   334  		for _, entry := range t.storageValues[account] {
   335  			if size >= max {
   336  				abort = true
   337  				break
   338  			}
   339  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   340  				continue
   341  			}
   342  			keys = append(keys, common.BytesToHash(entry.k))
   343  			vals = append(vals, entry.v)
   344  			size += uint64(32 + len(entry.v))
   345  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   346  				break
   347  			}
   348  		}
   349  		if len(keys) > 0 {
   350  			hashes = append(hashes, keys)
   351  			slots = append(slots, vals)
   352  		}
   353  		// Generate the Merkle proofs for the first and last storage slot, but
   354  		// only if the response was capped. If the entire storage trie included
   355  		// in the response, no need for any proofs.
   356  		if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
   357  			// If we're aborting, we need to prove the first and last item
   358  			// This terminates the response (and thus the loop)
   359  			proof := trienode.NewProofSet()
   360  			stTrie := t.storageTries[account]
   361  
   362  			// Here's a potential gotcha: when constructing the proof, we cannot
   363  			// use the 'origin' slice directly, but must use the full 32-byte
   364  			// hash form.
   365  			if err := stTrie.Prove(originHash[:], proof); err != nil {
   366  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   367  			}
   368  			if len(keys) > 0 {
   369  				lastK := (keys[len(keys)-1])[:]
   370  				if err := stTrie.Prove(lastK, proof); err != nil {
   371  					t.logger.Error("Could not prove last item", "error", err)
   372  				}
   373  			}
   374  			for _, blob := range proof.List() {
   375  				proofs = append(proofs, blob)
   376  			}
   377  			break
   378  		}
   379  	}
   380  	return hashes, slots, proofs
   381  }
   382  
   383  // createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always
   384  // supplies the proof for the last account, even if it is 'complete'.
   385  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   386  	var size uint64
   387  	max = max * 3 / 4
   388  
   389  	var origin common.Hash
   390  	if len(bOrigin) > 0 {
   391  		origin = common.BytesToHash(bOrigin)
   392  	}
   393  	var exit bool
   394  	for i, account := range accounts {
   395  		var keys []common.Hash
   396  		var vals [][]byte
   397  		for _, entry := range t.storageValues[account] {
   398  			if bytes.Compare(entry.k, origin[:]) < 0 {
   399  				exit = true
   400  			}
   401  			keys = append(keys, common.BytesToHash(entry.k))
   402  			vals = append(vals, entry.v)
   403  			size += uint64(32 + len(entry.v))
   404  			if size > max {
   405  				exit = true
   406  			}
   407  		}
   408  		if i == len(accounts)-1 {
   409  			exit = true
   410  		}
   411  		hashes = append(hashes, keys)
   412  		slots = append(slots, vals)
   413  
   414  		if exit {
   415  			// If we're aborting, we need to prove the first and last item
   416  			// This terminates the response (and thus the loop)
   417  			proof := trienode.NewProofSet()
   418  			stTrie := t.storageTries[account]
   419  
   420  			// Here's a potential gotcha: when constructing the proof, we cannot
   421  			// use the 'origin' slice directly, but must use the full 32-byte
   422  			// hash form.
   423  			if err := stTrie.Prove(origin[:], proof); err != nil {
   424  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   425  					"error", err)
   426  			}
   427  			if len(keys) > 0 {
   428  				lastK := (keys[len(keys)-1])[:]
   429  				if err := stTrie.Prove(lastK, proof); err != nil {
   430  					t.logger.Error("Could not prove last item", "error", err)
   431  				}
   432  			}
   433  			for _, blob := range proof.List() {
   434  				proofs = append(proofs, blob)
   435  			}
   436  			break
   437  		}
   438  	}
   439  	return hashes, slots, proofs
   440  }
   441  
   442  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   443  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   444  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   445  	return nil
   446  }
   447  
   448  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   449  	return nil
   450  }
   451  
   452  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   453  	t.remote.OnTrieNodes(t, requestId, nil)
   454  	return nil
   455  }
   456  
   457  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   458  	return nil
   459  }
   460  
   461  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   462  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   463  	return nil
   464  }
   465  
   466  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   467  	return nil
   468  }
   469  
   470  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   471  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   472  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   473  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   474  		t.term()
   475  	}
   476  	return nil
   477  }
   478  
   479  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   480  //	var bytecodes [][]byte
   481  //	t.remote.OnByteCodes(t, id, bytecodes)
   482  //	return nil
   483  //}
   484  
   485  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   486  	var bytecodes [][]byte
   487  	for _, h := range hashes {
   488  		// Send back the hashes
   489  		bytecodes = append(bytecodes, h[:])
   490  	}
   491  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   492  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   493  		// Mimic the real-life handler, which drops a peer on errors
   494  		t.remote.Unregister(t.id)
   495  	}
   496  	return nil
   497  }
   498  
   499  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   500  	var bytecodes [][]byte
   501  	for _, h := range hashes[:1] {
   502  		bytecodes = append(bytecodes, getCodeByHash(h))
   503  	}
   504  	// Missing bytecode can be retrieved again, no error expected
   505  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   506  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   507  		t.term()
   508  	}
   509  	return nil
   510  }
   511  
   512  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   513  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   514  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   515  }
   516  
   517  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   518  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   519  }
   520  
   521  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   522  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   523  //}
   524  
   525  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   526  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   527  	if len(proofs) > 0 {
   528  		proofs = proofs[1:]
   529  	}
   530  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   531  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   532  		// Mimic the real-life handler, which drops a peer on errors
   533  		t.remote.Unregister(t.id)
   534  	}
   535  	return nil
   536  }
   537  
   538  // corruptStorageRequestHandler doesn't provide good proofs
   539  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   540  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   541  	if len(proofs) > 0 {
   542  		proofs = proofs[1:]
   543  	}
   544  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   545  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   546  		// Mimic the real-life handler, which drops a peer on errors
   547  		t.remote.Unregister(t.id)
   548  	}
   549  	return nil
   550  }
   551  
   552  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   553  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   554  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   555  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   556  		// Mimic the real-life handler, which drops a peer on errors
   557  		t.remote.Unregister(t.id)
   558  	}
   559  	return nil
   560  }
   561  
   562  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   563  // also ship the entire trie inside the proof. If the attack is successful,
   564  // the remote side does not do any follow-up requests
   565  func TestSyncBloatedProof(t *testing.T) {
   566  	t.Parallel()
   567  
   568  	testSyncBloatedProof(t, rawdb.HashScheme)
   569  	testSyncBloatedProof(t, rawdb.PathScheme)
   570  }
   571  
   572  func testSyncBloatedProof(t *testing.T, scheme string) {
   573  	var (
   574  		once   sync.Once
   575  		cancel = make(chan struct{})
   576  		term   = func() {
   577  			once.Do(func() {
   578  				close(cancel)
   579  			})
   580  		}
   581  	)
   582  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   583  	source := newTestPeer("source", t, term)
   584  	source.accountTrie = sourceAccountTrie.Copy()
   585  	source.accountValues = elems
   586  
   587  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   588  		var (
   589  			proofs [][]byte
   590  			keys   []common.Hash
   591  			vals   [][]byte
   592  		)
   593  		// The values
   594  		for _, entry := range t.accountValues {
   595  			if bytes.Compare(entry.k, origin[:]) < 0 {
   596  				continue
   597  			}
   598  			if bytes.Compare(entry.k, limit[:]) > 0 {
   599  				continue
   600  			}
   601  			keys = append(keys, common.BytesToHash(entry.k))
   602  			vals = append(vals, entry.v)
   603  		}
   604  		// The proofs
   605  		proof := trienode.NewProofSet()
   606  		if err := t.accountTrie.Prove(origin[:], proof); err != nil {
   607  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   608  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   609  		}
   610  		// The bloat: add proof of every single element
   611  		for _, entry := range t.accountValues {
   612  			if err := t.accountTrie.Prove(entry.k, proof); err != nil {
   613  				t.logger.Error("Could not prove item", "error", err)
   614  			}
   615  		}
   616  		// And remove one item from the elements
   617  		if len(keys) > 2 {
   618  			keys = append(keys[:1], keys[2:]...)
   619  			vals = append(vals[:1], vals[2:]...)
   620  		}
   621  		for _, blob := range proof.List() {
   622  			proofs = append(proofs, blob)
   623  		}
   624  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   625  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   626  			t.term()
   627  			// This is actually correct, signal to exit the test successfully
   628  		}
   629  		return nil
   630  	}
   631  	syncer := setupSyncer(nodeScheme, source)
   632  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   633  		t.Fatal("No error returned from incomplete/cancelled sync")
   634  	}
   635  }
   636  
   637  func setupSyncer(scheme string, peers ...*testPeer) *Syncer {
   638  	stateDb := rawdb.NewMemoryDatabase()
   639  	syncer := NewSyncer(stateDb, scheme)
   640  	for _, peer := range peers {
   641  		syncer.Register(peer)
   642  		peer.remote = syncer
   643  	}
   644  	return syncer
   645  }
   646  
   647  // TestSync tests a basic sync with one peer
   648  func TestSync(t *testing.T) {
   649  	t.Parallel()
   650  
   651  	testSync(t, rawdb.HashScheme)
   652  	testSync(t, rawdb.PathScheme)
   653  }
   654  
   655  func testSync(t *testing.T, scheme string) {
   656  	var (
   657  		once   sync.Once
   658  		cancel = make(chan struct{})
   659  		term   = func() {
   660  			once.Do(func() {
   661  				close(cancel)
   662  			})
   663  		}
   664  	)
   665  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   666  
   667  	mkSource := func(name string) *testPeer {
   668  		source := newTestPeer(name, t, term)
   669  		source.accountTrie = sourceAccountTrie.Copy()
   670  		source.accountValues = elems
   671  		return source
   672  	}
   673  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   674  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   675  		t.Fatalf("sync failed: %v", err)
   676  	}
   677  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   678  }
   679  
   680  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   681  // panic within the prover
   682  func TestSyncTinyTriePanic(t *testing.T) {
   683  	t.Parallel()
   684  
   685  	testSyncTinyTriePanic(t, rawdb.HashScheme)
   686  	testSyncTinyTriePanic(t, rawdb.PathScheme)
   687  }
   688  
   689  func testSyncTinyTriePanic(t *testing.T, scheme string) {
   690  	var (
   691  		once   sync.Once
   692  		cancel = make(chan struct{})
   693  		term   = func() {
   694  			once.Do(func() {
   695  				close(cancel)
   696  			})
   697  		}
   698  	)
   699  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1, scheme)
   700  
   701  	mkSource := func(name string) *testPeer {
   702  		source := newTestPeer(name, t, term)
   703  		source.accountTrie = sourceAccountTrie.Copy()
   704  		source.accountValues = elems
   705  		return source
   706  	}
   707  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   708  	done := checkStall(t, term)
   709  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   710  		t.Fatalf("sync failed: %v", err)
   711  	}
   712  	close(done)
   713  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   714  }
   715  
   716  // TestMultiSync tests a basic sync with multiple peers
   717  func TestMultiSync(t *testing.T) {
   718  	t.Parallel()
   719  
   720  	testMultiSync(t, rawdb.HashScheme)
   721  	testMultiSync(t, rawdb.PathScheme)
   722  }
   723  
   724  func testMultiSync(t *testing.T, scheme string) {
   725  	var (
   726  		once   sync.Once
   727  		cancel = make(chan struct{})
   728  		term   = func() {
   729  			once.Do(func() {
   730  				close(cancel)
   731  			})
   732  		}
   733  	)
   734  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   735  
   736  	mkSource := func(name string) *testPeer {
   737  		source := newTestPeer(name, t, term)
   738  		source.accountTrie = sourceAccountTrie.Copy()
   739  		source.accountValues = elems
   740  		return source
   741  	}
   742  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"), mkSource("sourceB"))
   743  	done := checkStall(t, term)
   744  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   745  		t.Fatalf("sync failed: %v", err)
   746  	}
   747  	close(done)
   748  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   749  }
   750  
   751  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   752  func TestSyncWithStorage(t *testing.T) {
   753  	t.Parallel()
   754  
   755  	testSyncWithStorage(t, rawdb.HashScheme)
   756  	testSyncWithStorage(t, rawdb.PathScheme)
   757  }
   758  
   759  func testSyncWithStorage(t *testing.T, scheme string) {
   760  	var (
   761  		once   sync.Once
   762  		cancel = make(chan struct{})
   763  		term   = func() {
   764  			once.Do(func() {
   765  				close(cancel)
   766  			})
   767  		}
   768  	)
   769  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false, false)
   770  
   771  	mkSource := func(name string) *testPeer {
   772  		source := newTestPeer(name, t, term)
   773  		source.accountTrie = sourceAccountTrie.Copy()
   774  		source.accountValues = elems
   775  		source.setStorageTries(storageTries)
   776  		source.storageValues = storageElems
   777  		return source
   778  	}
   779  	syncer := setupSyncer(scheme, mkSource("sourceA"))
   780  	done := checkStall(t, term)
   781  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   782  		t.Fatalf("sync failed: %v", err)
   783  	}
   784  	close(done)
   785  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   786  }
   787  
   788  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   789  func TestMultiSyncManyUseless(t *testing.T) {
   790  	t.Parallel()
   791  
   792  	testMultiSyncManyUseless(t, rawdb.HashScheme)
   793  	testMultiSyncManyUseless(t, rawdb.PathScheme)
   794  }
   795  
   796  func testMultiSyncManyUseless(t *testing.T, scheme string) {
   797  	var (
   798  		once   sync.Once
   799  		cancel = make(chan struct{})
   800  		term   = func() {
   801  			once.Do(func() {
   802  				close(cancel)
   803  			})
   804  		}
   805  	)
   806  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
   807  
   808  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   809  		source := newTestPeer(name, t, term)
   810  		source.accountTrie = sourceAccountTrie.Copy()
   811  		source.accountValues = elems
   812  		source.setStorageTries(storageTries)
   813  		source.storageValues = storageElems
   814  
   815  		if !noAccount {
   816  			source.accountRequestHandler = emptyRequestAccountRangeFn
   817  		}
   818  		if !noStorage {
   819  			source.storageRequestHandler = emptyStorageRequestHandler
   820  		}
   821  		if !noTrieNode {
   822  			source.trieRequestHandler = emptyTrieRequestHandler
   823  		}
   824  		return source
   825  	}
   826  
   827  	syncer := setupSyncer(
   828  		scheme,
   829  		mkSource("full", true, true, true),
   830  		mkSource("noAccounts", false, true, true),
   831  		mkSource("noStorage", true, false, true),
   832  		mkSource("noTrie", true, true, false),
   833  	)
   834  	done := checkStall(t, term)
   835  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   836  		t.Fatalf("sync failed: %v", err)
   837  	}
   838  	close(done)
   839  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   840  }
   841  
   842  // TestMultiSyncManyUselessWithLowTimeout contains one good peer, and many which doesn't return anything valuable at all
   843  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   844  	t.Parallel()
   845  
   846  	testMultiSyncManyUselessWithLowTimeout(t, rawdb.HashScheme)
   847  	testMultiSyncManyUselessWithLowTimeout(t, rawdb.PathScheme)
   848  }
   849  
   850  func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
   851  	var (
   852  		once   sync.Once
   853  		cancel = make(chan struct{})
   854  		term   = func() {
   855  			once.Do(func() {
   856  				close(cancel)
   857  			})
   858  		}
   859  	)
   860  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
   861  
   862  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   863  		source := newTestPeer(name, t, term)
   864  		source.accountTrie = sourceAccountTrie.Copy()
   865  		source.accountValues = elems
   866  		source.setStorageTries(storageTries)
   867  		source.storageValues = storageElems
   868  
   869  		if !noAccount {
   870  			source.accountRequestHandler = emptyRequestAccountRangeFn
   871  		}
   872  		if !noStorage {
   873  			source.storageRequestHandler = emptyStorageRequestHandler
   874  		}
   875  		if !noTrieNode {
   876  			source.trieRequestHandler = emptyTrieRequestHandler
   877  		}
   878  		return source
   879  	}
   880  
   881  	syncer := setupSyncer(
   882  		scheme,
   883  		mkSource("full", true, true, true),
   884  		mkSource("noAccounts", false, true, true),
   885  		mkSource("noStorage", true, false, true),
   886  		mkSource("noTrie", true, true, false),
   887  	)
   888  	// We're setting the timeout to very low, to increase the chance of the timeout
   889  	// being triggered. This was previously a cause of panic, when a response
   890  	// arrived simultaneously as a timeout was triggered.
   891  	syncer.rates.OverrideTTLLimit = time.Millisecond
   892  
   893  	done := checkStall(t, term)
   894  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   895  		t.Fatalf("sync failed: %v", err)
   896  	}
   897  	close(done)
   898  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   899  }
   900  
   901  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   902  func TestMultiSyncManyUnresponsive(t *testing.T) {
   903  	t.Parallel()
   904  
   905  	testMultiSyncManyUnresponsive(t, rawdb.HashScheme)
   906  	testMultiSyncManyUnresponsive(t, rawdb.PathScheme)
   907  }
   908  
   909  func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
   910  	var (
   911  		once   sync.Once
   912  		cancel = make(chan struct{})
   913  		term   = func() {
   914  			once.Do(func() {
   915  				close(cancel)
   916  			})
   917  		}
   918  	)
   919  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
   920  
   921  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   922  		source := newTestPeer(name, t, term)
   923  		source.accountTrie = sourceAccountTrie.Copy()
   924  		source.accountValues = elems
   925  		source.setStorageTries(storageTries)
   926  		source.storageValues = storageElems
   927  
   928  		if !noAccount {
   929  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   930  		}
   931  		if !noStorage {
   932  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   933  		}
   934  		if !noTrieNode {
   935  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   936  		}
   937  		return source
   938  	}
   939  
   940  	syncer := setupSyncer(
   941  		scheme,
   942  		mkSource("full", true, true, true),
   943  		mkSource("noAccounts", false, true, true),
   944  		mkSource("noStorage", true, false, true),
   945  		mkSource("noTrie", true, true, false),
   946  	)
   947  	// We're setting the timeout to very low, to make the test run a bit faster
   948  	syncer.rates.OverrideTTLLimit = time.Millisecond
   949  
   950  	done := checkStall(t, term)
   951  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   952  		t.Fatalf("sync failed: %v", err)
   953  	}
   954  	close(done)
   955  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   956  }
   957  
   958  func checkStall(t *testing.T, term func()) chan struct{} {
   959  	testDone := make(chan struct{})
   960  	go func() {
   961  		select {
   962  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   963  			t.Log("Sync stalled")
   964  			term()
   965  		case <-testDone:
   966  			return
   967  		}
   968  	}()
   969  	return testDone
   970  }
   971  
   972  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   973  // account trie has a few boundary elements.
   974  func TestSyncBoundaryAccountTrie(t *testing.T) {
   975  	t.Parallel()
   976  
   977  	testSyncBoundaryAccountTrie(t, rawdb.HashScheme)
   978  	testSyncBoundaryAccountTrie(t, rawdb.PathScheme)
   979  }
   980  
   981  func testSyncBoundaryAccountTrie(t *testing.T, scheme string) {
   982  	var (
   983  		once   sync.Once
   984  		cancel = make(chan struct{})
   985  		term   = func() {
   986  			once.Do(func() {
   987  				close(cancel)
   988  			})
   989  		}
   990  	)
   991  	nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(scheme, 3000)
   992  
   993  	mkSource := func(name string) *testPeer {
   994  		source := newTestPeer(name, t, term)
   995  		source.accountTrie = sourceAccountTrie.Copy()
   996  		source.accountValues = elems
   997  		return source
   998  	}
   999  	syncer := setupSyncer(
  1000  		nodeScheme,
  1001  		mkSource("peer-a"),
  1002  		mkSource("peer-b"),
  1003  	)
  1004  	done := checkStall(t, term)
  1005  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1006  		t.Fatalf("sync failed: %v", err)
  1007  	}
  1008  	close(done)
  1009  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1010  }
  1011  
  1012  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
  1013  // consistently returning very small results
  1014  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
  1015  	t.Parallel()
  1016  
  1017  	testSyncNoStorageAndOneCappedPeer(t, rawdb.HashScheme)
  1018  	testSyncNoStorageAndOneCappedPeer(t, rawdb.PathScheme)
  1019  }
  1020  
  1021  func testSyncNoStorageAndOneCappedPeer(t *testing.T, scheme string) {
  1022  	var (
  1023  		once   sync.Once
  1024  		cancel = make(chan struct{})
  1025  		term   = func() {
  1026  			once.Do(func() {
  1027  				close(cancel)
  1028  			})
  1029  		}
  1030  	)
  1031  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1032  
  1033  	mkSource := func(name string, slow bool) *testPeer {
  1034  		source := newTestPeer(name, t, term)
  1035  		source.accountTrie = sourceAccountTrie.Copy()
  1036  		source.accountValues = elems
  1037  
  1038  		if slow {
  1039  			source.accountRequestHandler = starvingAccountRequestHandler
  1040  		}
  1041  		return source
  1042  	}
  1043  
  1044  	syncer := setupSyncer(
  1045  		nodeScheme,
  1046  		mkSource("nice-a", false),
  1047  		mkSource("nice-b", false),
  1048  		mkSource("nice-c", false),
  1049  		mkSource("capped", true),
  1050  	)
  1051  	done := checkStall(t, term)
  1052  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1053  		t.Fatalf("sync failed: %v", err)
  1054  	}
  1055  	close(done)
  1056  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1057  }
  1058  
  1059  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  1060  // code requests properly.
  1061  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  1062  	t.Parallel()
  1063  
  1064  	testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.HashScheme)
  1065  	testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.PathScheme)
  1066  }
  1067  
  1068  func testSyncNoStorageAndOneCodeCorruptPeer(t *testing.T, scheme string) {
  1069  	var (
  1070  		once   sync.Once
  1071  		cancel = make(chan struct{})
  1072  		term   = func() {
  1073  			once.Do(func() {
  1074  				close(cancel)
  1075  			})
  1076  		}
  1077  	)
  1078  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1079  
  1080  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1081  		source := newTestPeer(name, t, term)
  1082  		source.accountTrie = sourceAccountTrie.Copy()
  1083  		source.accountValues = elems
  1084  		source.codeRequestHandler = codeFn
  1085  		return source
  1086  	}
  1087  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1088  	// chance that the full set of codes requested are sent only to the
  1089  	// non-corrupt peer, which delivers everything in one go, and makes the
  1090  	// test moot
  1091  	syncer := setupSyncer(
  1092  		nodeScheme,
  1093  		mkSource("capped", cappedCodeRequestHandler),
  1094  		mkSource("corrupt", corruptCodeRequestHandler),
  1095  	)
  1096  	done := checkStall(t, term)
  1097  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1098  		t.Fatalf("sync failed: %v", err)
  1099  	}
  1100  	close(done)
  1101  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1102  }
  1103  
  1104  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1105  	t.Parallel()
  1106  
  1107  	testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.HashScheme)
  1108  	testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.PathScheme)
  1109  }
  1110  
  1111  func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) {
  1112  	var (
  1113  		once   sync.Once
  1114  		cancel = make(chan struct{})
  1115  		term   = func() {
  1116  			once.Do(func() {
  1117  				close(cancel)
  1118  			})
  1119  		}
  1120  	)
  1121  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1122  
  1123  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1124  		source := newTestPeer(name, t, term)
  1125  		source.accountTrie = sourceAccountTrie.Copy()
  1126  		source.accountValues = elems
  1127  		source.accountRequestHandler = accFn
  1128  		return source
  1129  	}
  1130  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1131  	// chance that the full set of codes requested are sent only to the
  1132  	// non-corrupt peer, which delivers everything in one go, and makes the
  1133  	// test moot
  1134  	syncer := setupSyncer(
  1135  		nodeScheme,
  1136  		mkSource("capped", defaultAccountRequestHandler),
  1137  		mkSource("corrupt", corruptAccountRequestHandler),
  1138  	)
  1139  	done := checkStall(t, term)
  1140  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1141  		t.Fatalf("sync failed: %v", err)
  1142  	}
  1143  	close(done)
  1144  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1145  }
  1146  
  1147  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1148  // one by one
  1149  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1150  	t.Parallel()
  1151  
  1152  	testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.HashScheme)
  1153  	testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.PathScheme)
  1154  }
  1155  
  1156  func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) {
  1157  	var (
  1158  		once   sync.Once
  1159  		cancel = make(chan struct{})
  1160  		term   = func() {
  1161  			once.Do(func() {
  1162  				close(cancel)
  1163  			})
  1164  		}
  1165  	)
  1166  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1167  
  1168  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1169  		source := newTestPeer(name, t, term)
  1170  		source.accountTrie = sourceAccountTrie.Copy()
  1171  		source.accountValues = elems
  1172  		source.codeRequestHandler = codeFn
  1173  		return source
  1174  	}
  1175  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1176  	// so it shouldn't be more than that
  1177  	var counter int
  1178  	syncer := setupSyncer(
  1179  		nodeScheme,
  1180  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1181  			counter++
  1182  			return cappedCodeRequestHandler(t, id, hashes, max)
  1183  		}),
  1184  	)
  1185  	done := checkStall(t, term)
  1186  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1187  		t.Fatalf("sync failed: %v", err)
  1188  	}
  1189  	close(done)
  1190  
  1191  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1192  	// deduplication is per request batch. If it were a perfect global dedup,
  1193  	// we would expect only 8 requests. If there were no dedup, there would be
  1194  	// 3k requests.
  1195  	// We expect somewhere below 100 requests for these 8 unique hashes. But
  1196  	// the number can be flaky, so don't limit it so strictly.
  1197  	if threshold := 100; counter > threshold {
  1198  		t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
  1199  	}
  1200  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1201  }
  1202  
  1203  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1204  // storage trie has a few boundary elements.
  1205  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1206  	t.Parallel()
  1207  
  1208  	testSyncBoundaryStorageTrie(t, rawdb.HashScheme)
  1209  	testSyncBoundaryStorageTrie(t, rawdb.PathScheme)
  1210  }
  1211  
  1212  func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
  1213  	var (
  1214  		once   sync.Once
  1215  		cancel = make(chan struct{})
  1216  		term   = func() {
  1217  			once.Do(func() {
  1218  				close(cancel)
  1219  			})
  1220  		}
  1221  	)
  1222  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true, false)
  1223  
  1224  	mkSource := func(name string) *testPeer {
  1225  		source := newTestPeer(name, t, term)
  1226  		source.accountTrie = sourceAccountTrie.Copy()
  1227  		source.accountValues = elems
  1228  		source.setStorageTries(storageTries)
  1229  		source.storageValues = storageElems
  1230  		return source
  1231  	}
  1232  	syncer := setupSyncer(
  1233  		scheme,
  1234  		mkSource("peer-a"),
  1235  		mkSource("peer-b"),
  1236  	)
  1237  	done := checkStall(t, term)
  1238  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1239  		t.Fatalf("sync failed: %v", err)
  1240  	}
  1241  	close(done)
  1242  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1243  }
  1244  
  1245  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1246  // consistently returning very small results
  1247  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1248  	t.Parallel()
  1249  
  1250  	testSyncWithStorageAndOneCappedPeer(t, rawdb.HashScheme)
  1251  	testSyncWithStorageAndOneCappedPeer(t, rawdb.PathScheme)
  1252  }
  1253  
  1254  func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
  1255  	var (
  1256  		once   sync.Once
  1257  		cancel = make(chan struct{})
  1258  		term   = func() {
  1259  			once.Do(func() {
  1260  				close(cancel)
  1261  			})
  1262  		}
  1263  	)
  1264  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false, false)
  1265  
  1266  	mkSource := func(name string, slow bool) *testPeer {
  1267  		source := newTestPeer(name, t, term)
  1268  		source.accountTrie = sourceAccountTrie.Copy()
  1269  		source.accountValues = elems
  1270  		source.setStorageTries(storageTries)
  1271  		source.storageValues = storageElems
  1272  
  1273  		if slow {
  1274  			source.storageRequestHandler = starvingStorageRequestHandler
  1275  		}
  1276  		return source
  1277  	}
  1278  
  1279  	syncer := setupSyncer(
  1280  		scheme,
  1281  		mkSource("nice-a", false),
  1282  		mkSource("slow", true),
  1283  	)
  1284  	done := checkStall(t, term)
  1285  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1286  		t.Fatalf("sync failed: %v", err)
  1287  	}
  1288  	close(done)
  1289  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1290  }
  1291  
  1292  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1293  // sometimes sending bad proofs
  1294  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1295  	t.Parallel()
  1296  
  1297  	testSyncWithStorageAndCorruptPeer(t, rawdb.HashScheme)
  1298  	testSyncWithStorageAndCorruptPeer(t, rawdb.PathScheme)
  1299  }
  1300  
  1301  func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
  1302  	var (
  1303  		once   sync.Once
  1304  		cancel = make(chan struct{})
  1305  		term   = func() {
  1306  			once.Do(func() {
  1307  				close(cancel)
  1308  			})
  1309  		}
  1310  	)
  1311  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
  1312  
  1313  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1314  		source := newTestPeer(name, t, term)
  1315  		source.accountTrie = sourceAccountTrie.Copy()
  1316  		source.accountValues = elems
  1317  		source.setStorageTries(storageTries)
  1318  		source.storageValues = storageElems
  1319  		source.storageRequestHandler = handler
  1320  		return source
  1321  	}
  1322  
  1323  	syncer := setupSyncer(
  1324  		scheme,
  1325  		mkSource("nice-a", defaultStorageRequestHandler),
  1326  		mkSource("nice-b", defaultStorageRequestHandler),
  1327  		mkSource("nice-c", defaultStorageRequestHandler),
  1328  		mkSource("corrupt", corruptStorageRequestHandler),
  1329  	)
  1330  	done := checkStall(t, term)
  1331  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1332  		t.Fatalf("sync failed: %v", err)
  1333  	}
  1334  	close(done)
  1335  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1336  }
  1337  
  1338  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1339  	t.Parallel()
  1340  
  1341  	testSyncWithStorageAndNonProvingPeer(t, rawdb.HashScheme)
  1342  	testSyncWithStorageAndNonProvingPeer(t, rawdb.PathScheme)
  1343  }
  1344  
  1345  func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
  1346  	var (
  1347  		once   sync.Once
  1348  		cancel = make(chan struct{})
  1349  		term   = func() {
  1350  			once.Do(func() {
  1351  				close(cancel)
  1352  			})
  1353  		}
  1354  	)
  1355  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
  1356  
  1357  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1358  		source := newTestPeer(name, t, term)
  1359  		source.accountTrie = sourceAccountTrie.Copy()
  1360  		source.accountValues = elems
  1361  		source.setStorageTries(storageTries)
  1362  		source.storageValues = storageElems
  1363  		source.storageRequestHandler = handler
  1364  		return source
  1365  	}
  1366  	syncer := setupSyncer(
  1367  		scheme,
  1368  		mkSource("nice-a", defaultStorageRequestHandler),
  1369  		mkSource("nice-b", defaultStorageRequestHandler),
  1370  		mkSource("nice-c", defaultStorageRequestHandler),
  1371  		mkSource("corrupt", noProofStorageRequestHandler),
  1372  	)
  1373  	done := checkStall(t, term)
  1374  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1375  		t.Fatalf("sync failed: %v", err)
  1376  	}
  1377  	close(done)
  1378  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1379  }
  1380  
  1381  // TestSyncWithStorageMisbehavingProve tests  basic sync using accounts + storage + code, against
  1382  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1383  // an error, where the recipient erroneously clipped the boundary nodes, but
  1384  // did not mark the account for healing.
  1385  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1386  	t.Parallel()
  1387  
  1388  	testSyncWithStorageMisbehavingProve(t, rawdb.HashScheme)
  1389  	testSyncWithStorageMisbehavingProve(t, rawdb.PathScheme)
  1390  }
  1391  
  1392  func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
  1393  	var (
  1394  		once   sync.Once
  1395  		cancel = make(chan struct{})
  1396  		term   = func() {
  1397  			once.Do(func() {
  1398  				close(cancel)
  1399  			})
  1400  		}
  1401  	)
  1402  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(scheme, 10, 30, false)
  1403  
  1404  	mkSource := func(name string) *testPeer {
  1405  		source := newTestPeer(name, t, term)
  1406  		source.accountTrie = sourceAccountTrie.Copy()
  1407  		source.accountValues = elems
  1408  		source.setStorageTries(storageTries)
  1409  		source.storageValues = storageElems
  1410  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1411  		return source
  1412  	}
  1413  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
  1414  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1415  		t.Fatalf("sync failed: %v", err)
  1416  	}
  1417  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1418  }
  1419  
  1420  // TestSyncWithUnevenStorage tests sync where the storage trie is not even
  1421  // and with a few empty ranges.
  1422  func TestSyncWithUnevenStorage(t *testing.T) {
  1423  	t.Parallel()
  1424  
  1425  	testSyncWithUnevenStorage(t, rawdb.HashScheme)
  1426  	testSyncWithUnevenStorage(t, rawdb.PathScheme)
  1427  }
  1428  
  1429  func testSyncWithUnevenStorage(t *testing.T, scheme string) {
  1430  	var (
  1431  		once   sync.Once
  1432  		cancel = make(chan struct{})
  1433  		term   = func() {
  1434  			once.Do(func() {
  1435  				close(cancel)
  1436  			})
  1437  		}
  1438  	)
  1439  	accountTrie, accounts, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 256, false, false, true)
  1440  
  1441  	mkSource := func(name string) *testPeer {
  1442  		source := newTestPeer(name, t, term)
  1443  		source.accountTrie = accountTrie.Copy()
  1444  		source.accountValues = accounts
  1445  		source.setStorageTries(storageTries)
  1446  		source.storageValues = storageElems
  1447  		source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  1448  			return defaultStorageRequestHandler(t, reqId, root, accounts, origin, limit, 128) // retrieve storage in large mode
  1449  		}
  1450  		return source
  1451  	}
  1452  	syncer := setupSyncer(scheme, mkSource("source"))
  1453  	if err := syncer.Sync(accountTrie.Hash(), cancel); err != nil {
  1454  		t.Fatalf("sync failed: %v", err)
  1455  	}
  1456  	verifyTrie(scheme, syncer.db, accountTrie.Hash(), t)
  1457  }
  1458  
  1459  type kv struct {
  1460  	k, v []byte
  1461  }
  1462  
  1463  func (k *kv) cmp(other *kv) int {
  1464  	return bytes.Compare(k.k, other.k)
  1465  }
  1466  
  1467  func key32(i uint64) []byte {
  1468  	key := make([]byte, 32)
  1469  	binary.LittleEndian.PutUint64(key, i)
  1470  	return key
  1471  }
  1472  
  1473  var (
  1474  	codehashes = []common.Hash{
  1475  		crypto.Keccak256Hash([]byte{0}),
  1476  		crypto.Keccak256Hash([]byte{1}),
  1477  		crypto.Keccak256Hash([]byte{2}),
  1478  		crypto.Keccak256Hash([]byte{3}),
  1479  		crypto.Keccak256Hash([]byte{4}),
  1480  		crypto.Keccak256Hash([]byte{5}),
  1481  		crypto.Keccak256Hash([]byte{6}),
  1482  		crypto.Keccak256Hash([]byte{7}),
  1483  	}
  1484  )
  1485  
  1486  // getCodeHash returns a pseudo-random code hash
  1487  func getCodeHash(i uint64) []byte {
  1488  	h := codehashes[int(i)%len(codehashes)]
  1489  	return common.CopyBytes(h[:])
  1490  }
  1491  
  1492  // getCodeByHash convenience function to lookup the code from the code hash
  1493  func getCodeByHash(hash common.Hash) []byte {
  1494  	if hash == types.EmptyCodeHash {
  1495  		return nil
  1496  	}
  1497  	for i, h := range codehashes {
  1498  		if h == hash {
  1499  			return []byte{byte(i)}
  1500  		}
  1501  	}
  1502  	return nil
  1503  }
  1504  
  1505  // makeAccountTrieNoStorage spits out a trie, along with the leaves
  1506  func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) {
  1507  	var (
  1508  		db      = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1509  		accTrie = trie.NewEmpty(db)
  1510  		entries []*kv
  1511  	)
  1512  	for i := uint64(1); i <= uint64(n); i++ {
  1513  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1514  			Nonce:    i,
  1515  			Balance:  uint256.NewInt(i),
  1516  			Root:     types.EmptyRootHash,
  1517  			CodeHash: getCodeHash(i),
  1518  		})
  1519  		key := key32(i)
  1520  		elem := &kv{key, value}
  1521  		accTrie.MustUpdate(elem.k, elem.v)
  1522  		entries = append(entries, elem)
  1523  	}
  1524  	slices.SortFunc(entries, (*kv).cmp)
  1525  
  1526  	// Commit the state changes into db and re-create the trie
  1527  	// for accessing later.
  1528  	root, nodes, _ := accTrie.Commit(false)
  1529  	db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
  1530  
  1531  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1532  	return db.Scheme(), accTrie, entries
  1533  }
  1534  
  1535  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1536  // accounts normally, this function will fill a few accounts which have
  1537  // boundary hash.
  1538  func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
  1539  	var (
  1540  		entries    []*kv
  1541  		boundaries []common.Hash
  1542  
  1543  		db      = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1544  		accTrie = trie.NewEmpty(db)
  1545  	)
  1546  	// Initialize boundaries
  1547  	var next common.Hash
  1548  	step := new(big.Int).Sub(
  1549  		new(big.Int).Div(
  1550  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1551  			big.NewInt(int64(accountConcurrency)),
  1552  		), common.Big1,
  1553  	)
  1554  	for i := 0; i < accountConcurrency; i++ {
  1555  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1556  		if i == accountConcurrency-1 {
  1557  			last = common.MaxHash
  1558  		}
  1559  		boundaries = append(boundaries, last)
  1560  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1561  	}
  1562  	// Fill boundary accounts
  1563  	for i := 0; i < len(boundaries); i++ {
  1564  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1565  			Nonce:    uint64(0),
  1566  			Balance:  uint256.NewInt(uint64(i)),
  1567  			Root:     types.EmptyRootHash,
  1568  			CodeHash: getCodeHash(uint64(i)),
  1569  		})
  1570  		elem := &kv{boundaries[i].Bytes(), value}
  1571  		accTrie.MustUpdate(elem.k, elem.v)
  1572  		entries = append(entries, elem)
  1573  	}
  1574  	// Fill other accounts if required
  1575  	for i := uint64(1); i <= uint64(n); i++ {
  1576  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1577  			Nonce:    i,
  1578  			Balance:  uint256.NewInt(i),
  1579  			Root:     types.EmptyRootHash,
  1580  			CodeHash: getCodeHash(i),
  1581  		})
  1582  		elem := &kv{key32(i), value}
  1583  		accTrie.MustUpdate(elem.k, elem.v)
  1584  		entries = append(entries, elem)
  1585  	}
  1586  	slices.SortFunc(entries, (*kv).cmp)
  1587  
  1588  	// Commit the state changes into db and re-create the trie
  1589  	// for accessing later.
  1590  	root, nodes, _ := accTrie.Commit(false)
  1591  	db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
  1592  
  1593  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1594  	return db.Scheme(), accTrie, entries
  1595  }
  1596  
  1597  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1598  // has a unique storage set.
  1599  func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
  1600  	var (
  1601  		db             = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1602  		accTrie        = trie.NewEmpty(db)
  1603  		entries        []*kv
  1604  		storageRoots   = make(map[common.Hash]common.Hash)
  1605  		storageTries   = make(map[common.Hash]*trie.Trie)
  1606  		storageEntries = make(map[common.Hash][]*kv)
  1607  		nodes          = trienode.NewMergedNodeSet()
  1608  	)
  1609  	// Create n accounts in the trie
  1610  	for i := uint64(1); i <= uint64(accounts); i++ {
  1611  		key := key32(i)
  1612  		codehash := types.EmptyCodeHash.Bytes()
  1613  		if code {
  1614  			codehash = getCodeHash(i)
  1615  		}
  1616  		// Create a storage trie
  1617  		stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
  1618  		nodes.Merge(stNodes)
  1619  
  1620  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1621  			Nonce:    i,
  1622  			Balance:  uint256.NewInt(i),
  1623  			Root:     stRoot,
  1624  			CodeHash: codehash,
  1625  		})
  1626  		elem := &kv{key, value}
  1627  		accTrie.MustUpdate(elem.k, elem.v)
  1628  		entries = append(entries, elem)
  1629  
  1630  		storageRoots[common.BytesToHash(key)] = stRoot
  1631  		storageEntries[common.BytesToHash(key)] = stEntries
  1632  	}
  1633  	slices.SortFunc(entries, (*kv).cmp)
  1634  
  1635  	// Commit account trie
  1636  	root, set, _ := accTrie.Commit(true)
  1637  	nodes.Merge(set)
  1638  
  1639  	// Commit gathered dirty nodes into database
  1640  	db.Update(root, types.EmptyRootHash, 0, nodes, nil)
  1641  
  1642  	// Re-create tries with new root
  1643  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1644  	for i := uint64(1); i <= uint64(accounts); i++ {
  1645  		key := key32(i)
  1646  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1647  		trie, _ := trie.New(id, db)
  1648  		storageTries[common.BytesToHash(key)] = trie
  1649  	}
  1650  	return db.Scheme(), accTrie, entries, storageTries, storageEntries
  1651  }
  1652  
  1653  // makeAccountTrieWithStorage spits out a trie, along with the leaves
  1654  func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
  1655  	var (
  1656  		db             = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1657  		accTrie        = trie.NewEmpty(db)
  1658  		entries        []*kv
  1659  		storageRoots   = make(map[common.Hash]common.Hash)
  1660  		storageTries   = make(map[common.Hash]*trie.Trie)
  1661  		storageEntries = make(map[common.Hash][]*kv)
  1662  		nodes          = trienode.NewMergedNodeSet()
  1663  	)
  1664  	// Create n accounts in the trie
  1665  	for i := uint64(1); i <= uint64(accounts); i++ {
  1666  		key := key32(i)
  1667  		codehash := types.EmptyCodeHash.Bytes()
  1668  		if code {
  1669  			codehash = getCodeHash(i)
  1670  		}
  1671  		// Make a storage trie
  1672  		var (
  1673  			stRoot    common.Hash
  1674  			stNodes   *trienode.NodeSet
  1675  			stEntries []*kv
  1676  		)
  1677  		if boundary {
  1678  			stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
  1679  		} else if uneven {
  1680  			stRoot, stNodes, stEntries = makeUnevenStorageTrie(common.BytesToHash(key), slots, db)
  1681  		} else {
  1682  			stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
  1683  		}
  1684  		nodes.Merge(stNodes)
  1685  
  1686  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1687  			Nonce:    i,
  1688  			Balance:  uint256.NewInt(i),
  1689  			Root:     stRoot,
  1690  			CodeHash: codehash,
  1691  		})
  1692  		elem := &kv{key, value}
  1693  		accTrie.MustUpdate(elem.k, elem.v)
  1694  		entries = append(entries, elem)
  1695  
  1696  		// we reuse the same one for all accounts
  1697  		storageRoots[common.BytesToHash(key)] = stRoot
  1698  		storageEntries[common.BytesToHash(key)] = stEntries
  1699  	}
  1700  	slices.SortFunc(entries, (*kv).cmp)
  1701  
  1702  	// Commit account trie
  1703  	root, set, _ := accTrie.Commit(true)
  1704  	nodes.Merge(set)
  1705  
  1706  	// Commit gathered dirty nodes into database
  1707  	db.Update(root, types.EmptyRootHash, 0, nodes, nil)
  1708  
  1709  	// Re-create tries with new root
  1710  	accTrie, err := trie.New(trie.StateTrieID(root), db)
  1711  	if err != nil {
  1712  		panic(err)
  1713  	}
  1714  	for i := uint64(1); i <= uint64(accounts); i++ {
  1715  		key := key32(i)
  1716  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1717  		trie, err := trie.New(id, db)
  1718  		if err != nil {
  1719  			panic(err)
  1720  		}
  1721  		storageTries[common.BytesToHash(key)] = trie
  1722  	}
  1723  	return accTrie, entries, storageTries, storageEntries
  1724  }
  1725  
  1726  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1727  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1728  // that tries are unique.
  1729  func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1730  	trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1731  	var entries []*kv
  1732  	for i := uint64(1); i <= n; i++ {
  1733  		// store 'x' at slot 'x'
  1734  		slotValue := key32(i + seed)
  1735  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1736  
  1737  		slotKey := key32(i)
  1738  		key := crypto.Keccak256Hash(slotKey[:])
  1739  
  1740  		elem := &kv{key[:], rlpSlotValue}
  1741  		trie.MustUpdate(elem.k, elem.v)
  1742  		entries = append(entries, elem)
  1743  	}
  1744  	slices.SortFunc(entries, (*kv).cmp)
  1745  	root, nodes, _ := trie.Commit(false)
  1746  	return root, nodes, entries
  1747  }
  1748  
  1749  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1750  // storage slots normally, this function will fill a few slots which have
  1751  // boundary hash.
  1752  func makeBoundaryStorageTrie(owner common.Hash, n int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1753  	var (
  1754  		entries    []*kv
  1755  		boundaries []common.Hash
  1756  		trie, _    = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1757  	)
  1758  	// Initialize boundaries
  1759  	var next common.Hash
  1760  	step := new(big.Int).Sub(
  1761  		new(big.Int).Div(
  1762  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1763  			big.NewInt(int64(accountConcurrency)),
  1764  		), common.Big1,
  1765  	)
  1766  	for i := 0; i < accountConcurrency; i++ {
  1767  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1768  		if i == accountConcurrency-1 {
  1769  			last = common.MaxHash
  1770  		}
  1771  		boundaries = append(boundaries, last)
  1772  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1773  	}
  1774  	// Fill boundary slots
  1775  	for i := 0; i < len(boundaries); i++ {
  1776  		key := boundaries[i]
  1777  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1778  
  1779  		elem := &kv{key[:], val}
  1780  		trie.MustUpdate(elem.k, elem.v)
  1781  		entries = append(entries, elem)
  1782  	}
  1783  	// Fill other slots if required
  1784  	for i := uint64(1); i <= uint64(n); i++ {
  1785  		slotKey := key32(i)
  1786  		key := crypto.Keccak256Hash(slotKey[:])
  1787  
  1788  		slotValue := key32(i)
  1789  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1790  
  1791  		elem := &kv{key[:], rlpSlotValue}
  1792  		trie.MustUpdate(elem.k, elem.v)
  1793  		entries = append(entries, elem)
  1794  	}
  1795  	slices.SortFunc(entries, (*kv).cmp)
  1796  	root, nodes, _ := trie.Commit(false)
  1797  	return root, nodes, entries
  1798  }
  1799  
  1800  // makeUnevenStorageTrie constructs a storage tries will states distributed in
  1801  // different range unevenly.
  1802  func makeUnevenStorageTrie(owner common.Hash, slots int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1803  	var (
  1804  		entries []*kv
  1805  		tr, _   = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1806  		chosen  = make(map[byte]struct{})
  1807  	)
  1808  	for i := 0; i < 3; i++ {
  1809  		var n int
  1810  		for {
  1811  			n = mrand.Intn(15) // the last range is set empty deliberately
  1812  			if _, ok := chosen[byte(n)]; ok {
  1813  				continue
  1814  			}
  1815  			chosen[byte(n)] = struct{}{}
  1816  			break
  1817  		}
  1818  		for j := 0; j < slots/3; j++ {
  1819  			key := append([]byte{byte(n)}, testrand.Bytes(31)...)
  1820  			val, _ := rlp.EncodeToBytes(testrand.Bytes(32))
  1821  
  1822  			elem := &kv{key, val}
  1823  			tr.MustUpdate(elem.k, elem.v)
  1824  			entries = append(entries, elem)
  1825  		}
  1826  	}
  1827  	slices.SortFunc(entries, (*kv).cmp)
  1828  	root, nodes, _ := tr.Commit(false)
  1829  	return root, nodes, entries
  1830  }
  1831  
  1832  func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1833  	t.Helper()
  1834  	triedb := triedb.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
  1835  	accTrie, err := trie.New(trie.StateTrieID(root), triedb)
  1836  	if err != nil {
  1837  		t.Fatal(err)
  1838  	}
  1839  	accounts, slots := 0, 0
  1840  	accIt := trie.NewIterator(accTrie.MustNodeIterator(nil))
  1841  	for accIt.Next() {
  1842  		var acc struct {
  1843  			Nonce    uint64
  1844  			Balance  *big.Int
  1845  			Root     common.Hash
  1846  			CodeHash []byte
  1847  		}
  1848  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1849  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1850  		}
  1851  		accounts++
  1852  		if acc.Root != types.EmptyRootHash {
  1853  			id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root)
  1854  			storeTrie, err := trie.NewStateTrie(id, triedb)
  1855  			if err != nil {
  1856  				t.Fatal(err)
  1857  			}
  1858  			storeIt := trie.NewIterator(storeTrie.MustNodeIterator(nil))
  1859  			for storeIt.Next() {
  1860  				slots++
  1861  			}
  1862  			if err := storeIt.Err; err != nil {
  1863  				t.Fatal(err)
  1864  			}
  1865  		}
  1866  	}
  1867  	if err := accIt.Err; err != nil {
  1868  		t.Fatal(err)
  1869  	}
  1870  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1871  }
  1872  
  1873  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1874  // state healing
  1875  func TestSyncAccountPerformance(t *testing.T) {
  1876  	// These tests must not run in parallel: they modify the
  1877  	// global var accountConcurrency
  1878  	// t.Parallel()
  1879  	testSyncAccountPerformance(t, rawdb.HashScheme)
  1880  	testSyncAccountPerformance(t, rawdb.PathScheme)
  1881  }
  1882  
  1883  func testSyncAccountPerformance(t *testing.T, scheme string) {
  1884  	// Set the account concurrency to 1. This _should_ result in the
  1885  	// range root to become correct, and there should be no healing needed
  1886  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1887  	accountConcurrency = 1
  1888  
  1889  	var (
  1890  		once   sync.Once
  1891  		cancel = make(chan struct{})
  1892  		term   = func() {
  1893  			once.Do(func() {
  1894  				close(cancel)
  1895  			})
  1896  		}
  1897  	)
  1898  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
  1899  
  1900  	mkSource := func(name string) *testPeer {
  1901  		source := newTestPeer(name, t, term)
  1902  		source.accountTrie = sourceAccountTrie.Copy()
  1903  		source.accountValues = elems
  1904  		return source
  1905  	}
  1906  	src := mkSource("source")
  1907  	syncer := setupSyncer(nodeScheme, src)
  1908  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1909  		t.Fatalf("sync failed: %v", err)
  1910  	}
  1911  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1912  	// The trie root will always be requested, since it is added when the snap
  1913  	// sync cycle starts. When popping the queue, we do not look it up again.
  1914  	// Doing so would bring this number down to zero in this artificial testcase,
  1915  	// but only add extra IO for no reason in practice.
  1916  	if have, want := src.nTrienodeRequests, 1; have != want {
  1917  		fmt.Print(src.Stats())
  1918  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1919  	}
  1920  }
  1921  
  1922  func TestSlotEstimation(t *testing.T) {
  1923  	for i, tc := range []struct {
  1924  		last  common.Hash
  1925  		count int
  1926  		want  uint64
  1927  	}{
  1928  		{
  1929  			// Half the space
  1930  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1931  			100,
  1932  			100,
  1933  		},
  1934  		{
  1935  			// 1 / 16th
  1936  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1937  			100,
  1938  			1500,
  1939  		},
  1940  		{
  1941  			// Bit more than 1 / 16th
  1942  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1943  			100,
  1944  			1499,
  1945  		},
  1946  		{
  1947  			// Almost everything
  1948  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1949  			100,
  1950  			6,
  1951  		},
  1952  		{
  1953  			// Almost nothing -- should lead to error
  1954  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1955  			1,
  1956  			0,
  1957  		},
  1958  		{
  1959  			// Nothing -- should lead to error
  1960  			common.Hash{},
  1961  			100,
  1962  			0,
  1963  		},
  1964  	} {
  1965  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1966  		if want := tc.want; have != want {
  1967  			t.Errorf("test %d: have %d want %d", i, have, want)
  1968  		}
  1969  	}
  1970  }
  1971  
  1972  func newDbConfig(scheme string) *triedb.Config {
  1973  	if scheme == rawdb.HashScheme {
  1974  		return &triedb.Config{}
  1975  	}
  1976  	return &triedb.Config{PathDB: pathdb.Defaults}
  1977  }