github.com/ethereum/go-ethereum@v1.16.1/eth/protocols/snap/sync_test.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"slices"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/core/rawdb"
    33  	"github.com/ethereum/go-ethereum/core/types"
    34  	"github.com/ethereum/go-ethereum/crypto"
    35  	"github.com/ethereum/go-ethereum/ethdb"
    36  	"github.com/ethereum/go-ethereum/internal/testrand"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/rlp"
    39  	"github.com/ethereum/go-ethereum/trie"
    40  	"github.com/ethereum/go-ethereum/trie/trienode"
    41  	"github.com/ethereum/go-ethereum/triedb"
    42  	"github.com/ethereum/go-ethereum/triedb/pathdb"
    43  	"github.com/holiman/uint256"
    44  	"golang.org/x/crypto/sha3"
    45  )
    46  
    47  func TestHashing(t *testing.T) {
    48  	t.Parallel()
    49  
    50  	var bytecodes = make([][]byte, 10)
    51  	for i := 0; i < len(bytecodes); i++ {
    52  		buf := make([]byte, 100)
    53  		rand.Read(buf)
    54  		bytecodes[i] = buf
    55  	}
    56  	var want, got string
    57  	var old = func() {
    58  		hasher := sha3.NewLegacyKeccak256()
    59  		for i := 0; i < len(bytecodes); i++ {
    60  			hasher.Reset()
    61  			hasher.Write(bytecodes[i])
    62  			hash := hasher.Sum(nil)
    63  			got = fmt.Sprintf("%v\n%v", got, hash)
    64  		}
    65  	}
    66  	var new = func() {
    67  		hasher := crypto.NewKeccakState()
    68  		var hash = make([]byte, 32)
    69  		for i := 0; i < len(bytecodes); i++ {
    70  			hasher.Reset()
    71  			hasher.Write(bytecodes[i])
    72  			hasher.Read(hash)
    73  			want = fmt.Sprintf("%v\n%v", want, hash)
    74  		}
    75  	}
    76  	old()
    77  	new()
    78  	if want != got {
    79  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    80  	}
    81  }
    82  
    83  func BenchmarkHashing(b *testing.B) {
    84  	var bytecodes = make([][]byte, 10000)
    85  	for i := 0; i < len(bytecodes); i++ {
    86  		buf := make([]byte, 100)
    87  		rand.Read(buf)
    88  		bytecodes[i] = buf
    89  	}
    90  	var old = func() {
    91  		hasher := sha3.NewLegacyKeccak256()
    92  		for i := 0; i < len(bytecodes); i++ {
    93  			hasher.Reset()
    94  			hasher.Write(bytecodes[i])
    95  			hasher.Sum(nil)
    96  		}
    97  	}
    98  	var new = func() {
    99  		hasher := crypto.NewKeccakState()
   100  		var hash = make([]byte, 32)
   101  		for i := 0; i < len(bytecodes); i++ {
   102  			hasher.Reset()
   103  			hasher.Write(bytecodes[i])
   104  			hasher.Read(hash)
   105  		}
   106  	}
   107  	b.Run("old", func(b *testing.B) {
   108  		b.ReportAllocs()
   109  		for i := 0; i < b.N; i++ {
   110  			old()
   111  		}
   112  	})
   113  	b.Run("new", func(b *testing.B) {
   114  		b.ReportAllocs()
   115  		for i := 0; i < b.N; i++ {
   116  			new()
   117  		}
   118  	})
   119  }
   120  
   121  type (
   122  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   123  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   124  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   125  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   126  )
   127  
   128  type testPeer struct {
   129  	id            string
   130  	test          *testing.T
   131  	remote        *Syncer
   132  	logger        log.Logger
   133  	accountTrie   *trie.Trie
   134  	accountValues []*kv
   135  	storageTries  map[common.Hash]*trie.Trie
   136  	storageValues map[common.Hash][]*kv
   137  
   138  	accountRequestHandler accountHandlerFunc
   139  	storageRequestHandler storageHandlerFunc
   140  	trieRequestHandler    trieHandlerFunc
   141  	codeRequestHandler    codeHandlerFunc
   142  	term                  func()
   143  
   144  	// counters
   145  	nAccountRequests  int
   146  	nStorageRequests  int
   147  	nBytecodeRequests int
   148  	nTrienodeRequests int
   149  }
   150  
   151  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   152  	peer := &testPeer{
   153  		id:                    id,
   154  		test:                  t,
   155  		logger:                log.New("id", id),
   156  		accountRequestHandler: defaultAccountRequestHandler,
   157  		trieRequestHandler:    defaultTrieRequestHandler,
   158  		storageRequestHandler: defaultStorageRequestHandler,
   159  		codeRequestHandler:    defaultCodeRequestHandler,
   160  		term:                  term,
   161  	}
   162  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   163  	//peer.logger.SetHandler(stderrHandler)
   164  	return peer
   165  }
   166  
   167  func (t *testPeer) setStorageTries(tries map[common.Hash]*trie.Trie) {
   168  	t.storageTries = make(map[common.Hash]*trie.Trie)
   169  	for root, trie := range tries {
   170  		t.storageTries[root] = trie.Copy()
   171  	}
   172  }
   173  
   174  func (t *testPeer) ID() string      { return t.id }
   175  func (t *testPeer) Log() log.Logger { return t.logger }
   176  
   177  func (t *testPeer) Stats() string {
   178  	return fmt.Sprintf(`Account requests: %d
   179  Storage requests: %d
   180  Bytecode requests: %d
   181  Trienode requests: %d
   182  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   183  }
   184  
   185  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   186  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   187  	t.nAccountRequests++
   188  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   189  	return nil
   190  }
   191  
   192  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   193  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   194  	t.nTrienodeRequests++
   195  	go t.trieRequestHandler(t, id, root, paths, bytes)
   196  	return nil
   197  }
   198  
   199  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   200  	t.nStorageRequests++
   201  	if len(accounts) == 1 && origin != nil {
   202  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   203  	} else {
   204  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   205  	}
   206  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   207  	return nil
   208  }
   209  
   210  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   211  	t.nBytecodeRequests++
   212  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   213  	go t.codeRequestHandler(t, id, hashes, bytes)
   214  	return nil
   215  }
   216  
   217  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   218  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   219  	// Pass the response
   220  	var nodes [][]byte
   221  	for _, pathset := range paths {
   222  		switch len(pathset) {
   223  		case 1:
   224  			blob, _, err := t.accountTrie.GetNode(pathset[0])
   225  			if err != nil {
   226  				t.logger.Info("Error handling req", "error", err)
   227  				break
   228  			}
   229  			nodes = append(nodes, blob)
   230  		default:
   231  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   232  			for _, path := range pathset[1:] {
   233  				blob, _, err := account.GetNode(path)
   234  				if err != nil {
   235  					t.logger.Info("Error handling req", "error", err)
   236  					break
   237  				}
   238  				nodes = append(nodes, blob)
   239  			}
   240  		}
   241  	}
   242  	t.remote.OnTrieNodes(t, requestId, nodes)
   243  	return nil
   244  }
   245  
   246  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   247  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   248  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   249  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   250  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   251  		t.term()
   252  		return err
   253  	}
   254  	return nil
   255  }
   256  
   257  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   258  	var size uint64
   259  	if limit == (common.Hash{}) {
   260  		limit = common.MaxHash
   261  	}
   262  	for _, entry := range t.accountValues {
   263  		if size > cap {
   264  			break
   265  		}
   266  		if bytes.Compare(origin[:], entry.k) <= 0 {
   267  			keys = append(keys, common.BytesToHash(entry.k))
   268  			vals = append(vals, entry.v)
   269  			size += uint64(32 + len(entry.v))
   270  		}
   271  		// If we've exceeded the request threshold, abort
   272  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   273  			break
   274  		}
   275  	}
   276  	// Unless we send the entire trie, we need to supply proofs
   277  	// Actually, we need to supply proofs either way! This seems to be an implementation
   278  	// quirk in go-ethereum
   279  	proof := trienode.NewProofSet()
   280  	if err := t.accountTrie.Prove(origin[:], proof); err != nil {
   281  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   282  	}
   283  	if len(keys) > 0 {
   284  		lastK := (keys[len(keys)-1])[:]
   285  		if err := t.accountTrie.Prove(lastK, proof); err != nil {
   286  			t.logger.Error("Could not prove last item", "error", err)
   287  		}
   288  	}
   289  	return keys, vals, proof.List()
   290  }
   291  
   292  // defaultStorageRequestHandler is a well-behaving storage request handler
   293  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   294  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   295  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   296  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   297  		t.term()
   298  	}
   299  	return nil
   300  }
   301  
   302  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   303  	var bytecodes [][]byte
   304  	for _, h := range hashes {
   305  		bytecodes = append(bytecodes, getCodeByHash(h))
   306  	}
   307  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   308  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   309  		t.term()
   310  	}
   311  	return nil
   312  }
   313  
   314  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   315  	var size uint64
   316  	for _, account := range accounts {
   317  		// The first account might start from a different origin and end sooner
   318  		var originHash common.Hash
   319  		if len(origin) > 0 {
   320  			originHash = common.BytesToHash(origin)
   321  		}
   322  		var limitHash = common.MaxHash
   323  		if len(limit) > 0 {
   324  			limitHash = common.BytesToHash(limit)
   325  		}
   326  		var (
   327  			keys  []common.Hash
   328  			vals  [][]byte
   329  			abort bool
   330  		)
   331  		for _, entry := range t.storageValues[account] {
   332  			if size >= max {
   333  				abort = true
   334  				break
   335  			}
   336  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   337  				continue
   338  			}
   339  			keys = append(keys, common.BytesToHash(entry.k))
   340  			vals = append(vals, entry.v)
   341  			size += uint64(32 + len(entry.v))
   342  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   343  				break
   344  			}
   345  		}
   346  		if len(keys) > 0 {
   347  			hashes = append(hashes, keys)
   348  			slots = append(slots, vals)
   349  		}
   350  		// Generate the Merkle proofs for the first and last storage slot, but
   351  		// only if the response was capped. If the entire storage trie included
   352  		// in the response, no need for any proofs.
   353  		if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
   354  			// If we're aborting, we need to prove the first and last item
   355  			// This terminates the response (and thus the loop)
   356  			proof := trienode.NewProofSet()
   357  			stTrie := t.storageTries[account]
   358  
   359  			// Here's a potential gotcha: when constructing the proof, we cannot
   360  			// use the 'origin' slice directly, but must use the full 32-byte
   361  			// hash form.
   362  			if err := stTrie.Prove(originHash[:], proof); err != nil {
   363  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   364  			}
   365  			if len(keys) > 0 {
   366  				lastK := (keys[len(keys)-1])[:]
   367  				if err := stTrie.Prove(lastK, proof); err != nil {
   368  					t.logger.Error("Could not prove last item", "error", err)
   369  				}
   370  			}
   371  			proofs = append(proofs, proof.List()...)
   372  			break
   373  		}
   374  	}
   375  	return hashes, slots, proofs
   376  }
   377  
   378  // createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always
   379  // supplies the proof for the last account, even if it is 'complete'.
   380  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   381  	var size uint64
   382  	max = max * 3 / 4
   383  
   384  	var origin common.Hash
   385  	if len(bOrigin) > 0 {
   386  		origin = common.BytesToHash(bOrigin)
   387  	}
   388  	var exit bool
   389  	for i, account := range accounts {
   390  		var keys []common.Hash
   391  		var vals [][]byte
   392  		for _, entry := range t.storageValues[account] {
   393  			if bytes.Compare(entry.k, origin[:]) < 0 {
   394  				exit = true
   395  			}
   396  			keys = append(keys, common.BytesToHash(entry.k))
   397  			vals = append(vals, entry.v)
   398  			size += uint64(32 + len(entry.v))
   399  			if size > max {
   400  				exit = true
   401  			}
   402  		}
   403  		if i == len(accounts)-1 {
   404  			exit = true
   405  		}
   406  		hashes = append(hashes, keys)
   407  		slots = append(slots, vals)
   408  
   409  		if exit {
   410  			// If we're aborting, we need to prove the first and last item
   411  			// This terminates the response (and thus the loop)
   412  			proof := trienode.NewProofSet()
   413  			stTrie := t.storageTries[account]
   414  
   415  			// Here's a potential gotcha: when constructing the proof, we cannot
   416  			// use the 'origin' slice directly, but must use the full 32-byte
   417  			// hash form.
   418  			if err := stTrie.Prove(origin[:], proof); err != nil {
   419  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   420  					"error", err)
   421  			}
   422  			if len(keys) > 0 {
   423  				lastK := (keys[len(keys)-1])[:]
   424  				if err := stTrie.Prove(lastK, proof); err != nil {
   425  					t.logger.Error("Could not prove last item", "error", err)
   426  				}
   427  			}
   428  			proofs = append(proofs, proof.List()...)
   429  			break
   430  		}
   431  	}
   432  	return hashes, slots, proofs
   433  }
   434  
   435  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   436  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   437  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   438  	return nil
   439  }
   440  
   441  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   442  	return nil
   443  }
   444  
   445  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   446  	t.remote.OnTrieNodes(t, requestId, nil)
   447  	return nil
   448  }
   449  
   450  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   451  	return nil
   452  }
   453  
   454  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   455  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   456  	return nil
   457  }
   458  
   459  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   460  	return nil
   461  }
   462  
   463  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   464  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   465  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   466  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   467  		t.term()
   468  	}
   469  	return nil
   470  }
   471  
   472  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   473  //	var bytecodes [][]byte
   474  //	t.remote.OnByteCodes(t, id, bytecodes)
   475  //	return nil
   476  //}
   477  
   478  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   479  	var bytecodes [][]byte
   480  	for _, h := range hashes {
   481  		// Send back the hashes
   482  		bytecodes = append(bytecodes, h[:])
   483  	}
   484  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   485  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   486  		// Mimic the real-life handler, which drops a peer on errors
   487  		t.remote.Unregister(t.id)
   488  	}
   489  	return nil
   490  }
   491  
   492  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   493  	var bytecodes [][]byte
   494  	for _, h := range hashes[:1] {
   495  		bytecodes = append(bytecodes, getCodeByHash(h))
   496  	}
   497  	// Missing bytecode can be retrieved again, no error expected
   498  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   499  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   500  		t.term()
   501  	}
   502  	return nil
   503  }
   504  
   505  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   506  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   507  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   508  }
   509  
   510  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   511  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   512  }
   513  
   514  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   515  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   516  //}
   517  
   518  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   519  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   520  	if len(proofs) > 0 {
   521  		proofs = proofs[1:]
   522  	}
   523  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   524  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   525  		// Mimic the real-life handler, which drops a peer on errors
   526  		t.remote.Unregister(t.id)
   527  	}
   528  	return nil
   529  }
   530  
   531  // corruptStorageRequestHandler doesn't provide good proofs
   532  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   533  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   534  	if len(proofs) > 0 {
   535  		proofs = proofs[1:]
   536  	}
   537  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   538  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   539  		// Mimic the real-life handler, which drops a peer on errors
   540  		t.remote.Unregister(t.id)
   541  	}
   542  	return nil
   543  }
   544  
   545  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   546  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   547  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   548  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   549  		// Mimic the real-life handler, which drops a peer on errors
   550  		t.remote.Unregister(t.id)
   551  	}
   552  	return nil
   553  }
   554  
   555  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   556  // also ship the entire trie inside the proof. If the attack is successful,
   557  // the remote side does not do any follow-up requests
   558  func TestSyncBloatedProof(t *testing.T) {
   559  	t.Parallel()
   560  
   561  	testSyncBloatedProof(t, rawdb.HashScheme)
   562  	testSyncBloatedProof(t, rawdb.PathScheme)
   563  }
   564  
   565  func testSyncBloatedProof(t *testing.T, scheme string) {
   566  	var (
   567  		once   sync.Once
   568  		cancel = make(chan struct{})
   569  		term   = func() {
   570  			once.Do(func() {
   571  				close(cancel)
   572  			})
   573  		}
   574  	)
   575  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   576  	source := newTestPeer("source", t, term)
   577  	source.accountTrie = sourceAccountTrie.Copy()
   578  	source.accountValues = elems
   579  
   580  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   581  		var (
   582  			keys []common.Hash
   583  			vals [][]byte
   584  		)
   585  		// The values
   586  		for _, entry := range t.accountValues {
   587  			if bytes.Compare(entry.k, origin[:]) < 0 {
   588  				continue
   589  			}
   590  			if bytes.Compare(entry.k, limit[:]) > 0 {
   591  				continue
   592  			}
   593  			keys = append(keys, common.BytesToHash(entry.k))
   594  			vals = append(vals, entry.v)
   595  		}
   596  		// The proofs
   597  		proof := trienode.NewProofSet()
   598  		if err := t.accountTrie.Prove(origin[:], proof); err != nil {
   599  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   600  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   601  		}
   602  		// The bloat: add proof of every single element
   603  		for _, entry := range t.accountValues {
   604  			if err := t.accountTrie.Prove(entry.k, proof); err != nil {
   605  				t.logger.Error("Could not prove item", "error", err)
   606  			}
   607  		}
   608  		// And remove one item from the elements
   609  		if len(keys) > 2 {
   610  			keys = append(keys[:1], keys[2:]...)
   611  			vals = append(vals[:1], vals[2:]...)
   612  		}
   613  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proof.List()); err != nil {
   614  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   615  			t.term()
   616  			// This is actually correct, signal to exit the test successfully
   617  		}
   618  		return nil
   619  	}
   620  	syncer := setupSyncer(nodeScheme, source)
   621  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   622  		t.Fatal("No error returned from incomplete/cancelled sync")
   623  	}
   624  }
   625  
   626  func setupSyncer(scheme string, peers ...*testPeer) *Syncer {
   627  	stateDb := rawdb.NewMemoryDatabase()
   628  	syncer := NewSyncer(stateDb, scheme)
   629  	for _, peer := range peers {
   630  		syncer.Register(peer)
   631  		peer.remote = syncer
   632  	}
   633  	return syncer
   634  }
   635  
   636  // TestSync tests a basic sync with one peer
   637  func TestSync(t *testing.T) {
   638  	t.Parallel()
   639  
   640  	testSync(t, rawdb.HashScheme)
   641  	testSync(t, rawdb.PathScheme)
   642  }
   643  
   644  func testSync(t *testing.T, scheme string) {
   645  	var (
   646  		once   sync.Once
   647  		cancel = make(chan struct{})
   648  		term   = func() {
   649  			once.Do(func() {
   650  				close(cancel)
   651  			})
   652  		}
   653  	)
   654  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   655  
   656  	mkSource := func(name string) *testPeer {
   657  		source := newTestPeer(name, t, term)
   658  		source.accountTrie = sourceAccountTrie.Copy()
   659  		source.accountValues = elems
   660  		return source
   661  	}
   662  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   663  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   664  		t.Fatalf("sync failed: %v", err)
   665  	}
   666  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   667  }
   668  
   669  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   670  // panic within the prover
   671  func TestSyncTinyTriePanic(t *testing.T) {
   672  	t.Parallel()
   673  
   674  	testSyncTinyTriePanic(t, rawdb.HashScheme)
   675  	testSyncTinyTriePanic(t, rawdb.PathScheme)
   676  }
   677  
   678  func testSyncTinyTriePanic(t *testing.T, scheme string) {
   679  	var (
   680  		once   sync.Once
   681  		cancel = make(chan struct{})
   682  		term   = func() {
   683  			once.Do(func() {
   684  				close(cancel)
   685  			})
   686  		}
   687  	)
   688  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1, scheme)
   689  
   690  	mkSource := func(name string) *testPeer {
   691  		source := newTestPeer(name, t, term)
   692  		source.accountTrie = sourceAccountTrie.Copy()
   693  		source.accountValues = elems
   694  		return source
   695  	}
   696  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   697  	done := checkStall(t, term)
   698  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   699  		t.Fatalf("sync failed: %v", err)
   700  	}
   701  	close(done)
   702  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   703  }
   704  
   705  // TestMultiSync tests a basic sync with multiple peers
   706  func TestMultiSync(t *testing.T) {
   707  	t.Parallel()
   708  
   709  	testMultiSync(t, rawdb.HashScheme)
   710  	testMultiSync(t, rawdb.PathScheme)
   711  }
   712  
   713  func testMultiSync(t *testing.T, scheme string) {
   714  	var (
   715  		once   sync.Once
   716  		cancel = make(chan struct{})
   717  		term   = func() {
   718  			once.Do(func() {
   719  				close(cancel)
   720  			})
   721  		}
   722  	)
   723  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   724  
   725  	mkSource := func(name string) *testPeer {
   726  		source := newTestPeer(name, t, term)
   727  		source.accountTrie = sourceAccountTrie.Copy()
   728  		source.accountValues = elems
   729  		return source
   730  	}
   731  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"), mkSource("sourceB"))
   732  	done := checkStall(t, term)
   733  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   734  		t.Fatalf("sync failed: %v", err)
   735  	}
   736  	close(done)
   737  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   738  }
   739  
   740  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   741  func TestSyncWithStorage(t *testing.T) {
   742  	t.Parallel()
   743  
   744  	testSyncWithStorage(t, rawdb.HashScheme)
   745  	testSyncWithStorage(t, rawdb.PathScheme)
   746  }
   747  
   748  func testSyncWithStorage(t *testing.T, scheme string) {
   749  	var (
   750  		once   sync.Once
   751  		cancel = make(chan struct{})
   752  		term   = func() {
   753  			once.Do(func() {
   754  				close(cancel)
   755  			})
   756  		}
   757  	)
   758  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false, false)
   759  
   760  	mkSource := func(name string) *testPeer {
   761  		source := newTestPeer(name, t, term)
   762  		source.accountTrie = sourceAccountTrie.Copy()
   763  		source.accountValues = elems
   764  		source.setStorageTries(storageTries)
   765  		source.storageValues = storageElems
   766  		return source
   767  	}
   768  	syncer := setupSyncer(scheme, mkSource("sourceA"))
   769  	done := checkStall(t, term)
   770  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   771  		t.Fatalf("sync failed: %v", err)
   772  	}
   773  	close(done)
   774  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   775  }
   776  
   777  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   778  func TestMultiSyncManyUseless(t *testing.T) {
   779  	t.Parallel()
   780  
   781  	testMultiSyncManyUseless(t, rawdb.HashScheme)
   782  	testMultiSyncManyUseless(t, rawdb.PathScheme)
   783  }
   784  
   785  func testMultiSyncManyUseless(t *testing.T, scheme string) {
   786  	var (
   787  		once   sync.Once
   788  		cancel = make(chan struct{})
   789  		term   = func() {
   790  			once.Do(func() {
   791  				close(cancel)
   792  			})
   793  		}
   794  	)
   795  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
   796  
   797  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   798  		source := newTestPeer(name, t, term)
   799  		source.accountTrie = sourceAccountTrie.Copy()
   800  		source.accountValues = elems
   801  		source.setStorageTries(storageTries)
   802  		source.storageValues = storageElems
   803  
   804  		if !noAccount {
   805  			source.accountRequestHandler = emptyRequestAccountRangeFn
   806  		}
   807  		if !noStorage {
   808  			source.storageRequestHandler = emptyStorageRequestHandler
   809  		}
   810  		if !noTrieNode {
   811  			source.trieRequestHandler = emptyTrieRequestHandler
   812  		}
   813  		return source
   814  	}
   815  
   816  	syncer := setupSyncer(
   817  		scheme,
   818  		mkSource("full", true, true, true),
   819  		mkSource("noAccounts", false, true, true),
   820  		mkSource("noStorage", true, false, true),
   821  		mkSource("noTrie", true, true, false),
   822  	)
   823  	done := checkStall(t, term)
   824  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   825  		t.Fatalf("sync failed: %v", err)
   826  	}
   827  	close(done)
   828  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   829  }
   830  
   831  // TestMultiSyncManyUselessWithLowTimeout contains one good peer, and many which doesn't return anything valuable at all
   832  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   833  	t.Parallel()
   834  
   835  	testMultiSyncManyUselessWithLowTimeout(t, rawdb.HashScheme)
   836  	testMultiSyncManyUselessWithLowTimeout(t, rawdb.PathScheme)
   837  }
   838  
   839  func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
   840  	var (
   841  		once   sync.Once
   842  		cancel = make(chan struct{})
   843  		term   = func() {
   844  			once.Do(func() {
   845  				close(cancel)
   846  			})
   847  		}
   848  	)
   849  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
   850  
   851  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   852  		source := newTestPeer(name, t, term)
   853  		source.accountTrie = sourceAccountTrie.Copy()
   854  		source.accountValues = elems
   855  		source.setStorageTries(storageTries)
   856  		source.storageValues = storageElems
   857  
   858  		if !noAccount {
   859  			source.accountRequestHandler = emptyRequestAccountRangeFn
   860  		}
   861  		if !noStorage {
   862  			source.storageRequestHandler = emptyStorageRequestHandler
   863  		}
   864  		if !noTrieNode {
   865  			source.trieRequestHandler = emptyTrieRequestHandler
   866  		}
   867  		return source
   868  	}
   869  
   870  	syncer := setupSyncer(
   871  		scheme,
   872  		mkSource("full", true, true, true),
   873  		mkSource("noAccounts", false, true, true),
   874  		mkSource("noStorage", true, false, true),
   875  		mkSource("noTrie", true, true, false),
   876  	)
   877  	// We're setting the timeout to very low, to increase the chance of the timeout
   878  	// being triggered. This was previously a cause of panic, when a response
   879  	// arrived simultaneously as a timeout was triggered.
   880  	syncer.rates.OverrideTTLLimit = time.Millisecond
   881  
   882  	done := checkStall(t, term)
   883  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   884  		t.Fatalf("sync failed: %v", err)
   885  	}
   886  	close(done)
   887  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   888  }
   889  
   890  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   891  func TestMultiSyncManyUnresponsive(t *testing.T) {
   892  	t.Parallel()
   893  
   894  	testMultiSyncManyUnresponsive(t, rawdb.HashScheme)
   895  	testMultiSyncManyUnresponsive(t, rawdb.PathScheme)
   896  }
   897  
   898  func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
   899  	var (
   900  		once   sync.Once
   901  		cancel = make(chan struct{})
   902  		term   = func() {
   903  			once.Do(func() {
   904  				close(cancel)
   905  			})
   906  		}
   907  	)
   908  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
   909  
   910  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   911  		source := newTestPeer(name, t, term)
   912  		source.accountTrie = sourceAccountTrie.Copy()
   913  		source.accountValues = elems
   914  		source.setStorageTries(storageTries)
   915  		source.storageValues = storageElems
   916  
   917  		if !noAccount {
   918  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   919  		}
   920  		if !noStorage {
   921  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   922  		}
   923  		if !noTrieNode {
   924  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   925  		}
   926  		return source
   927  	}
   928  
   929  	syncer := setupSyncer(
   930  		scheme,
   931  		mkSource("full", true, true, true),
   932  		mkSource("noAccounts", false, true, true),
   933  		mkSource("noStorage", true, false, true),
   934  		mkSource("noTrie", true, true, false),
   935  	)
   936  	// We're setting the timeout to very low, to make the test run a bit faster
   937  	syncer.rates.OverrideTTLLimit = time.Millisecond
   938  
   939  	done := checkStall(t, term)
   940  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   941  		t.Fatalf("sync failed: %v", err)
   942  	}
   943  	close(done)
   944  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   945  }
   946  
   947  func checkStall(t *testing.T, term func()) chan struct{} {
   948  	testDone := make(chan struct{})
   949  	go func() {
   950  		select {
   951  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   952  			t.Log("Sync stalled")
   953  			term()
   954  		case <-testDone:
   955  			return
   956  		}
   957  	}()
   958  	return testDone
   959  }
   960  
   961  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   962  // account trie has a few boundary elements.
   963  func TestSyncBoundaryAccountTrie(t *testing.T) {
   964  	t.Parallel()
   965  
   966  	testSyncBoundaryAccountTrie(t, rawdb.HashScheme)
   967  	testSyncBoundaryAccountTrie(t, rawdb.PathScheme)
   968  }
   969  
   970  func testSyncBoundaryAccountTrie(t *testing.T, scheme string) {
   971  	var (
   972  		once   sync.Once
   973  		cancel = make(chan struct{})
   974  		term   = func() {
   975  			once.Do(func() {
   976  				close(cancel)
   977  			})
   978  		}
   979  	)
   980  	nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(scheme, 3000)
   981  
   982  	mkSource := func(name string) *testPeer {
   983  		source := newTestPeer(name, t, term)
   984  		source.accountTrie = sourceAccountTrie.Copy()
   985  		source.accountValues = elems
   986  		return source
   987  	}
   988  	syncer := setupSyncer(
   989  		nodeScheme,
   990  		mkSource("peer-a"),
   991  		mkSource("peer-b"),
   992  	)
   993  	done := checkStall(t, term)
   994  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   995  		t.Fatalf("sync failed: %v", err)
   996  	}
   997  	close(done)
   998  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   999  }
  1000  
  1001  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
  1002  // consistently returning very small results
  1003  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
  1004  	t.Parallel()
  1005  
  1006  	testSyncNoStorageAndOneCappedPeer(t, rawdb.HashScheme)
  1007  	testSyncNoStorageAndOneCappedPeer(t, rawdb.PathScheme)
  1008  }
  1009  
  1010  func testSyncNoStorageAndOneCappedPeer(t *testing.T, scheme string) {
  1011  	var (
  1012  		once   sync.Once
  1013  		cancel = make(chan struct{})
  1014  		term   = func() {
  1015  			once.Do(func() {
  1016  				close(cancel)
  1017  			})
  1018  		}
  1019  	)
  1020  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1021  
  1022  	mkSource := func(name string, slow bool) *testPeer {
  1023  		source := newTestPeer(name, t, term)
  1024  		source.accountTrie = sourceAccountTrie.Copy()
  1025  		source.accountValues = elems
  1026  
  1027  		if slow {
  1028  			source.accountRequestHandler = starvingAccountRequestHandler
  1029  		}
  1030  		return source
  1031  	}
  1032  
  1033  	syncer := setupSyncer(
  1034  		nodeScheme,
  1035  		mkSource("nice-a", false),
  1036  		mkSource("nice-b", false),
  1037  		mkSource("nice-c", false),
  1038  		mkSource("capped", true),
  1039  	)
  1040  	done := checkStall(t, term)
  1041  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1042  		t.Fatalf("sync failed: %v", err)
  1043  	}
  1044  	close(done)
  1045  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1046  }
  1047  
  1048  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  1049  // code requests properly.
  1050  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  1051  	t.Parallel()
  1052  
  1053  	testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.HashScheme)
  1054  	testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.PathScheme)
  1055  }
  1056  
  1057  func testSyncNoStorageAndOneCodeCorruptPeer(t *testing.T, scheme string) {
  1058  	var (
  1059  		once   sync.Once
  1060  		cancel = make(chan struct{})
  1061  		term   = func() {
  1062  			once.Do(func() {
  1063  				close(cancel)
  1064  			})
  1065  		}
  1066  	)
  1067  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1068  
  1069  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1070  		source := newTestPeer(name, t, term)
  1071  		source.accountTrie = sourceAccountTrie.Copy()
  1072  		source.accountValues = elems
  1073  		source.codeRequestHandler = codeFn
  1074  		return source
  1075  	}
  1076  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1077  	// chance that the full set of codes requested are sent only to the
  1078  	// non-corrupt peer, which delivers everything in one go, and makes the
  1079  	// test moot
  1080  	syncer := setupSyncer(
  1081  		nodeScheme,
  1082  		mkSource("capped", cappedCodeRequestHandler),
  1083  		mkSource("corrupt", corruptCodeRequestHandler),
  1084  	)
  1085  	done := checkStall(t, term)
  1086  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1087  		t.Fatalf("sync failed: %v", err)
  1088  	}
  1089  	close(done)
  1090  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1091  }
  1092  
  1093  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1094  	t.Parallel()
  1095  
  1096  	testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.HashScheme)
  1097  	testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.PathScheme)
  1098  }
  1099  
  1100  func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) {
  1101  	var (
  1102  		once   sync.Once
  1103  		cancel = make(chan struct{})
  1104  		term   = func() {
  1105  			once.Do(func() {
  1106  				close(cancel)
  1107  			})
  1108  		}
  1109  	)
  1110  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1111  
  1112  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1113  		source := newTestPeer(name, t, term)
  1114  		source.accountTrie = sourceAccountTrie.Copy()
  1115  		source.accountValues = elems
  1116  		source.accountRequestHandler = accFn
  1117  		return source
  1118  	}
  1119  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1120  	// chance that the full set of codes requested are sent only to the
  1121  	// non-corrupt peer, which delivers everything in one go, and makes the
  1122  	// test moot
  1123  	syncer := setupSyncer(
  1124  		nodeScheme,
  1125  		mkSource("capped", defaultAccountRequestHandler),
  1126  		mkSource("corrupt", corruptAccountRequestHandler),
  1127  	)
  1128  	done := checkStall(t, term)
  1129  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1130  		t.Fatalf("sync failed: %v", err)
  1131  	}
  1132  	close(done)
  1133  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1134  }
  1135  
  1136  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1137  // one by one
  1138  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1139  	t.Parallel()
  1140  
  1141  	testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.HashScheme)
  1142  	testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.PathScheme)
  1143  }
  1144  
  1145  func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) {
  1146  	var (
  1147  		once   sync.Once
  1148  		cancel = make(chan struct{})
  1149  		term   = func() {
  1150  			once.Do(func() {
  1151  				close(cancel)
  1152  			})
  1153  		}
  1154  	)
  1155  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1156  
  1157  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1158  		source := newTestPeer(name, t, term)
  1159  		source.accountTrie = sourceAccountTrie.Copy()
  1160  		source.accountValues = elems
  1161  		source.codeRequestHandler = codeFn
  1162  		return source
  1163  	}
  1164  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1165  	// so it shouldn't be more than that
  1166  	var counter int
  1167  	syncer := setupSyncer(
  1168  		nodeScheme,
  1169  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1170  			counter++
  1171  			return cappedCodeRequestHandler(t, id, hashes, max)
  1172  		}),
  1173  	)
  1174  	done := checkStall(t, term)
  1175  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1176  		t.Fatalf("sync failed: %v", err)
  1177  	}
  1178  	close(done)
  1179  
  1180  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1181  	// deduplication is per request batch. If it were a perfect global dedup,
  1182  	// we would expect only 8 requests. If there were no dedup, there would be
  1183  	// 3k requests.
  1184  	// We expect somewhere below 100 requests for these 8 unique hashes. But
  1185  	// the number can be flaky, so don't limit it so strictly.
  1186  	if threshold := 100; counter > threshold {
  1187  		t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
  1188  	}
  1189  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1190  }
  1191  
  1192  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1193  // storage trie has a few boundary elements.
  1194  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1195  	t.Parallel()
  1196  
  1197  	testSyncBoundaryStorageTrie(t, rawdb.HashScheme)
  1198  	testSyncBoundaryStorageTrie(t, rawdb.PathScheme)
  1199  }
  1200  
  1201  func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
  1202  	var (
  1203  		once   sync.Once
  1204  		cancel = make(chan struct{})
  1205  		term   = func() {
  1206  			once.Do(func() {
  1207  				close(cancel)
  1208  			})
  1209  		}
  1210  	)
  1211  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true, false)
  1212  
  1213  	mkSource := func(name string) *testPeer {
  1214  		source := newTestPeer(name, t, term)
  1215  		source.accountTrie = sourceAccountTrie.Copy()
  1216  		source.accountValues = elems
  1217  		source.setStorageTries(storageTries)
  1218  		source.storageValues = storageElems
  1219  		return source
  1220  	}
  1221  	syncer := setupSyncer(
  1222  		scheme,
  1223  		mkSource("peer-a"),
  1224  		mkSource("peer-b"),
  1225  	)
  1226  	done := checkStall(t, term)
  1227  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1228  		t.Fatalf("sync failed: %v", err)
  1229  	}
  1230  	close(done)
  1231  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1232  }
  1233  
  1234  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1235  // consistently returning very small results
  1236  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1237  	t.Parallel()
  1238  
  1239  	testSyncWithStorageAndOneCappedPeer(t, rawdb.HashScheme)
  1240  	testSyncWithStorageAndOneCappedPeer(t, rawdb.PathScheme)
  1241  }
  1242  
  1243  func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
  1244  	var (
  1245  		once   sync.Once
  1246  		cancel = make(chan struct{})
  1247  		term   = func() {
  1248  			once.Do(func() {
  1249  				close(cancel)
  1250  			})
  1251  		}
  1252  	)
  1253  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false, false)
  1254  
  1255  	mkSource := func(name string, slow bool) *testPeer {
  1256  		source := newTestPeer(name, t, term)
  1257  		source.accountTrie = sourceAccountTrie.Copy()
  1258  		source.accountValues = elems
  1259  		source.setStorageTries(storageTries)
  1260  		source.storageValues = storageElems
  1261  
  1262  		if slow {
  1263  			source.storageRequestHandler = starvingStorageRequestHandler
  1264  		}
  1265  		return source
  1266  	}
  1267  
  1268  	syncer := setupSyncer(
  1269  		scheme,
  1270  		mkSource("nice-a", false),
  1271  		mkSource("slow", true),
  1272  	)
  1273  	done := checkStall(t, term)
  1274  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1275  		t.Fatalf("sync failed: %v", err)
  1276  	}
  1277  	close(done)
  1278  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1279  }
  1280  
  1281  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1282  // sometimes sending bad proofs
  1283  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1284  	t.Parallel()
  1285  
  1286  	testSyncWithStorageAndCorruptPeer(t, rawdb.HashScheme)
  1287  	testSyncWithStorageAndCorruptPeer(t, rawdb.PathScheme)
  1288  }
  1289  
  1290  func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
  1291  	var (
  1292  		once   sync.Once
  1293  		cancel = make(chan struct{})
  1294  		term   = func() {
  1295  			once.Do(func() {
  1296  				close(cancel)
  1297  			})
  1298  		}
  1299  	)
  1300  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
  1301  
  1302  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1303  		source := newTestPeer(name, t, term)
  1304  		source.accountTrie = sourceAccountTrie.Copy()
  1305  		source.accountValues = elems
  1306  		source.setStorageTries(storageTries)
  1307  		source.storageValues = storageElems
  1308  		source.storageRequestHandler = handler
  1309  		return source
  1310  	}
  1311  
  1312  	syncer := setupSyncer(
  1313  		scheme,
  1314  		mkSource("nice-a", defaultStorageRequestHandler),
  1315  		mkSource("nice-b", defaultStorageRequestHandler),
  1316  		mkSource("nice-c", defaultStorageRequestHandler),
  1317  		mkSource("corrupt", corruptStorageRequestHandler),
  1318  	)
  1319  	done := checkStall(t, term)
  1320  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1321  		t.Fatalf("sync failed: %v", err)
  1322  	}
  1323  	close(done)
  1324  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1325  }
  1326  
  1327  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1328  	t.Parallel()
  1329  
  1330  	testSyncWithStorageAndNonProvingPeer(t, rawdb.HashScheme)
  1331  	testSyncWithStorageAndNonProvingPeer(t, rawdb.PathScheme)
  1332  }
  1333  
  1334  func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
  1335  	var (
  1336  		once   sync.Once
  1337  		cancel = make(chan struct{})
  1338  		term   = func() {
  1339  			once.Do(func() {
  1340  				close(cancel)
  1341  			})
  1342  		}
  1343  	)
  1344  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
  1345  
  1346  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1347  		source := newTestPeer(name, t, term)
  1348  		source.accountTrie = sourceAccountTrie.Copy()
  1349  		source.accountValues = elems
  1350  		source.setStorageTries(storageTries)
  1351  		source.storageValues = storageElems
  1352  		source.storageRequestHandler = handler
  1353  		return source
  1354  	}
  1355  	syncer := setupSyncer(
  1356  		scheme,
  1357  		mkSource("nice-a", defaultStorageRequestHandler),
  1358  		mkSource("nice-b", defaultStorageRequestHandler),
  1359  		mkSource("nice-c", defaultStorageRequestHandler),
  1360  		mkSource("corrupt", noProofStorageRequestHandler),
  1361  	)
  1362  	done := checkStall(t, term)
  1363  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1364  		t.Fatalf("sync failed: %v", err)
  1365  	}
  1366  	close(done)
  1367  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1368  }
  1369  
  1370  // TestSyncWithStorageMisbehavingProve tests  basic sync using accounts + storage + code, against
  1371  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1372  // an error, where the recipient erroneously clipped the boundary nodes, but
  1373  // did not mark the account for healing.
  1374  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1375  	t.Parallel()
  1376  
  1377  	testSyncWithStorageMisbehavingProve(t, rawdb.HashScheme)
  1378  	testSyncWithStorageMisbehavingProve(t, rawdb.PathScheme)
  1379  }
  1380  
  1381  func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
  1382  	var (
  1383  		once   sync.Once
  1384  		cancel = make(chan struct{})
  1385  		term   = func() {
  1386  			once.Do(func() {
  1387  				close(cancel)
  1388  			})
  1389  		}
  1390  	)
  1391  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(scheme, 10, 30, false)
  1392  
  1393  	mkSource := func(name string) *testPeer {
  1394  		source := newTestPeer(name, t, term)
  1395  		source.accountTrie = sourceAccountTrie.Copy()
  1396  		source.accountValues = elems
  1397  		source.setStorageTries(storageTries)
  1398  		source.storageValues = storageElems
  1399  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1400  		return source
  1401  	}
  1402  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
  1403  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1404  		t.Fatalf("sync failed: %v", err)
  1405  	}
  1406  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1407  }
  1408  
  1409  // TestSyncWithUnevenStorage tests sync where the storage trie is not even
  1410  // and with a few empty ranges.
  1411  func TestSyncWithUnevenStorage(t *testing.T) {
  1412  	t.Parallel()
  1413  
  1414  	testSyncWithUnevenStorage(t, rawdb.HashScheme)
  1415  	testSyncWithUnevenStorage(t, rawdb.PathScheme)
  1416  }
  1417  
  1418  func testSyncWithUnevenStorage(t *testing.T, scheme string) {
  1419  	var (
  1420  		once   sync.Once
  1421  		cancel = make(chan struct{})
  1422  		term   = func() {
  1423  			once.Do(func() {
  1424  				close(cancel)
  1425  			})
  1426  		}
  1427  	)
  1428  	accountTrie, accounts, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 256, false, false, true)
  1429  
  1430  	mkSource := func(name string) *testPeer {
  1431  		source := newTestPeer(name, t, term)
  1432  		source.accountTrie = accountTrie.Copy()
  1433  		source.accountValues = accounts
  1434  		source.setStorageTries(storageTries)
  1435  		source.storageValues = storageElems
  1436  		source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  1437  			return defaultStorageRequestHandler(t, reqId, root, accounts, origin, limit, 128) // retrieve storage in large mode
  1438  		}
  1439  		return source
  1440  	}
  1441  	syncer := setupSyncer(scheme, mkSource("source"))
  1442  	if err := syncer.Sync(accountTrie.Hash(), cancel); err != nil {
  1443  		t.Fatalf("sync failed: %v", err)
  1444  	}
  1445  	verifyTrie(scheme, syncer.db, accountTrie.Hash(), t)
  1446  }
  1447  
  1448  type kv struct {
  1449  	k, v []byte
  1450  }
  1451  
  1452  func (k *kv) cmp(other *kv) int {
  1453  	return bytes.Compare(k.k, other.k)
  1454  }
  1455  
  1456  func key32(i uint64) []byte {
  1457  	key := make([]byte, 32)
  1458  	binary.LittleEndian.PutUint64(key, i)
  1459  	return key
  1460  }
  1461  
  1462  var (
  1463  	codehashes = []common.Hash{
  1464  		crypto.Keccak256Hash([]byte{0}),
  1465  		crypto.Keccak256Hash([]byte{1}),
  1466  		crypto.Keccak256Hash([]byte{2}),
  1467  		crypto.Keccak256Hash([]byte{3}),
  1468  		crypto.Keccak256Hash([]byte{4}),
  1469  		crypto.Keccak256Hash([]byte{5}),
  1470  		crypto.Keccak256Hash([]byte{6}),
  1471  		crypto.Keccak256Hash([]byte{7}),
  1472  	}
  1473  )
  1474  
  1475  // getCodeHash returns a pseudo-random code hash
  1476  func getCodeHash(i uint64) []byte {
  1477  	h := codehashes[int(i)%len(codehashes)]
  1478  	return common.CopyBytes(h[:])
  1479  }
  1480  
  1481  // getCodeByHash convenience function to lookup the code from the code hash
  1482  func getCodeByHash(hash common.Hash) []byte {
  1483  	if hash == types.EmptyCodeHash {
  1484  		return nil
  1485  	}
  1486  	for i, h := range codehashes {
  1487  		if h == hash {
  1488  			return []byte{byte(i)}
  1489  		}
  1490  	}
  1491  	return nil
  1492  }
  1493  
  1494  // makeAccountTrieNoStorage spits out a trie, along with the leaves
  1495  func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) {
  1496  	var (
  1497  		db      = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1498  		accTrie = trie.NewEmpty(db)
  1499  		entries []*kv
  1500  	)
  1501  	for i := uint64(1); i <= uint64(n); i++ {
  1502  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1503  			Nonce:    i,
  1504  			Balance:  uint256.NewInt(i),
  1505  			Root:     types.EmptyRootHash,
  1506  			CodeHash: getCodeHash(i),
  1507  		})
  1508  		key := key32(i)
  1509  		elem := &kv{key, value}
  1510  		accTrie.MustUpdate(elem.k, elem.v)
  1511  		entries = append(entries, elem)
  1512  	}
  1513  	slices.SortFunc(entries, (*kv).cmp)
  1514  
  1515  	// Commit the state changes into db and re-create the trie
  1516  	// for accessing later.
  1517  	root, nodes := accTrie.Commit(false)
  1518  	db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), triedb.NewStateSet())
  1519  
  1520  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1521  	return db.Scheme(), accTrie, entries
  1522  }
  1523  
  1524  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1525  // accounts normally, this function will fill a few accounts which have
  1526  // boundary hash.
  1527  func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
  1528  	var (
  1529  		entries    []*kv
  1530  		boundaries []common.Hash
  1531  
  1532  		db      = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1533  		accTrie = trie.NewEmpty(db)
  1534  	)
  1535  	// Initialize boundaries
  1536  	var next common.Hash
  1537  	step := new(big.Int).Sub(
  1538  		new(big.Int).Div(
  1539  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1540  			big.NewInt(int64(accountConcurrency)),
  1541  		), common.Big1,
  1542  	)
  1543  	for i := 0; i < accountConcurrency; i++ {
  1544  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1545  		if i == accountConcurrency-1 {
  1546  			last = common.MaxHash
  1547  		}
  1548  		boundaries = append(boundaries, last)
  1549  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1550  	}
  1551  	// Fill boundary accounts
  1552  	for i := 0; i < len(boundaries); i++ {
  1553  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1554  			Nonce:    uint64(0),
  1555  			Balance:  uint256.NewInt(uint64(i)),
  1556  			Root:     types.EmptyRootHash,
  1557  			CodeHash: getCodeHash(uint64(i)),
  1558  		})
  1559  		elem := &kv{boundaries[i].Bytes(), value}
  1560  		accTrie.MustUpdate(elem.k, elem.v)
  1561  		entries = append(entries, elem)
  1562  	}
  1563  	// Fill other accounts if required
  1564  	for i := uint64(1); i <= uint64(n); i++ {
  1565  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1566  			Nonce:    i,
  1567  			Balance:  uint256.NewInt(i),
  1568  			Root:     types.EmptyRootHash,
  1569  			CodeHash: getCodeHash(i),
  1570  		})
  1571  		elem := &kv{key32(i), value}
  1572  		accTrie.MustUpdate(elem.k, elem.v)
  1573  		entries = append(entries, elem)
  1574  	}
  1575  	slices.SortFunc(entries, (*kv).cmp)
  1576  
  1577  	// Commit the state changes into db and re-create the trie
  1578  	// for accessing later.
  1579  	root, nodes := accTrie.Commit(false)
  1580  	db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), triedb.NewStateSet())
  1581  
  1582  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1583  	return db.Scheme(), accTrie, entries
  1584  }
  1585  
  1586  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1587  // has a unique storage set.
  1588  func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
  1589  	var (
  1590  		db             = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1591  		accTrie        = trie.NewEmpty(db)
  1592  		entries        []*kv
  1593  		storageRoots   = make(map[common.Hash]common.Hash)
  1594  		storageTries   = make(map[common.Hash]*trie.Trie)
  1595  		storageEntries = make(map[common.Hash][]*kv)
  1596  		nodes          = trienode.NewMergedNodeSet()
  1597  	)
  1598  	// Create n accounts in the trie
  1599  	for i := uint64(1); i <= uint64(accounts); i++ {
  1600  		key := key32(i)
  1601  		codehash := types.EmptyCodeHash.Bytes()
  1602  		if code {
  1603  			codehash = getCodeHash(i)
  1604  		}
  1605  		// Create a storage trie
  1606  		stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
  1607  		nodes.Merge(stNodes)
  1608  
  1609  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1610  			Nonce:    i,
  1611  			Balance:  uint256.NewInt(i),
  1612  			Root:     stRoot,
  1613  			CodeHash: codehash,
  1614  		})
  1615  		elem := &kv{key, value}
  1616  		accTrie.MustUpdate(elem.k, elem.v)
  1617  		entries = append(entries, elem)
  1618  
  1619  		storageRoots[common.BytesToHash(key)] = stRoot
  1620  		storageEntries[common.BytesToHash(key)] = stEntries
  1621  	}
  1622  	slices.SortFunc(entries, (*kv).cmp)
  1623  
  1624  	// Commit account trie
  1625  	root, set := accTrie.Commit(true)
  1626  	nodes.Merge(set)
  1627  
  1628  	// Commit gathered dirty nodes into database
  1629  	db.Update(root, types.EmptyRootHash, 0, nodes, triedb.NewStateSet())
  1630  
  1631  	// Re-create tries with new root
  1632  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1633  	for i := uint64(1); i <= uint64(accounts); i++ {
  1634  		key := key32(i)
  1635  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1636  		trie, _ := trie.New(id, db)
  1637  		storageTries[common.BytesToHash(key)] = trie
  1638  	}
  1639  	return db.Scheme(), accTrie, entries, storageTries, storageEntries
  1640  }
  1641  
  1642  // makeAccountTrieWithStorage spits out a trie, along with the leaves
  1643  func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
  1644  	var (
  1645  		db             = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1646  		accTrie        = trie.NewEmpty(db)
  1647  		entries        []*kv
  1648  		storageRoots   = make(map[common.Hash]common.Hash)
  1649  		storageTries   = make(map[common.Hash]*trie.Trie)
  1650  		storageEntries = make(map[common.Hash][]*kv)
  1651  		nodes          = trienode.NewMergedNodeSet()
  1652  	)
  1653  	// Create n accounts in the trie
  1654  	for i := uint64(1); i <= uint64(accounts); i++ {
  1655  		key := key32(i)
  1656  		codehash := types.EmptyCodeHash.Bytes()
  1657  		if code {
  1658  			codehash = getCodeHash(i)
  1659  		}
  1660  		// Make a storage trie
  1661  		var (
  1662  			stRoot    common.Hash
  1663  			stNodes   *trienode.NodeSet
  1664  			stEntries []*kv
  1665  		)
  1666  		if boundary {
  1667  			stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
  1668  		} else if uneven {
  1669  			stRoot, stNodes, stEntries = makeUnevenStorageTrie(common.BytesToHash(key), slots, db)
  1670  		} else {
  1671  			stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
  1672  		}
  1673  		nodes.Merge(stNodes)
  1674  
  1675  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1676  			Nonce:    i,
  1677  			Balance:  uint256.NewInt(i),
  1678  			Root:     stRoot,
  1679  			CodeHash: codehash,
  1680  		})
  1681  		elem := &kv{key, value}
  1682  		accTrie.MustUpdate(elem.k, elem.v)
  1683  		entries = append(entries, elem)
  1684  
  1685  		// we reuse the same one for all accounts
  1686  		storageRoots[common.BytesToHash(key)] = stRoot
  1687  		storageEntries[common.BytesToHash(key)] = stEntries
  1688  	}
  1689  	slices.SortFunc(entries, (*kv).cmp)
  1690  
  1691  	// Commit account trie
  1692  	root, set := accTrie.Commit(true)
  1693  	nodes.Merge(set)
  1694  
  1695  	// Commit gathered dirty nodes into database
  1696  	db.Update(root, types.EmptyRootHash, 0, nodes, triedb.NewStateSet())
  1697  
  1698  	// Re-create tries with new root
  1699  	accTrie, err := trie.New(trie.StateTrieID(root), db)
  1700  	if err != nil {
  1701  		panic(err)
  1702  	}
  1703  	for i := uint64(1); i <= uint64(accounts); i++ {
  1704  		key := key32(i)
  1705  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1706  		trie, err := trie.New(id, db)
  1707  		if err != nil {
  1708  			panic(err)
  1709  		}
  1710  		storageTries[common.BytesToHash(key)] = trie
  1711  	}
  1712  	return accTrie, entries, storageTries, storageEntries
  1713  }
  1714  
  1715  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1716  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1717  // that tries are unique.
  1718  func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1719  	trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1720  	var entries []*kv
  1721  	for i := uint64(1); i <= n; i++ {
  1722  		// store 'x' at slot 'x'
  1723  		slotValue := key32(i + seed)
  1724  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1725  
  1726  		slotKey := key32(i)
  1727  		key := crypto.Keccak256Hash(slotKey[:])
  1728  
  1729  		elem := &kv{key[:], rlpSlotValue}
  1730  		trie.MustUpdate(elem.k, elem.v)
  1731  		entries = append(entries, elem)
  1732  	}
  1733  	slices.SortFunc(entries, (*kv).cmp)
  1734  	root, nodes := trie.Commit(false)
  1735  	return root, nodes, entries
  1736  }
  1737  
  1738  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1739  // storage slots normally, this function will fill a few slots which have
  1740  // boundary hash.
  1741  func makeBoundaryStorageTrie(owner common.Hash, n int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1742  	var (
  1743  		entries    []*kv
  1744  		boundaries []common.Hash
  1745  		trie, _    = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1746  	)
  1747  	// Initialize boundaries
  1748  	var next common.Hash
  1749  	step := new(big.Int).Sub(
  1750  		new(big.Int).Div(
  1751  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1752  			big.NewInt(int64(accountConcurrency)),
  1753  		), common.Big1,
  1754  	)
  1755  	for i := 0; i < accountConcurrency; i++ {
  1756  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1757  		if i == accountConcurrency-1 {
  1758  			last = common.MaxHash
  1759  		}
  1760  		boundaries = append(boundaries, last)
  1761  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1762  	}
  1763  	// Fill boundary slots
  1764  	for i := 0; i < len(boundaries); i++ {
  1765  		key := boundaries[i]
  1766  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1767  
  1768  		elem := &kv{key[:], val}
  1769  		trie.MustUpdate(elem.k, elem.v)
  1770  		entries = append(entries, elem)
  1771  	}
  1772  	// Fill other slots if required
  1773  	for i := uint64(1); i <= uint64(n); i++ {
  1774  		slotKey := key32(i)
  1775  		key := crypto.Keccak256Hash(slotKey[:])
  1776  
  1777  		slotValue := key32(i)
  1778  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1779  
  1780  		elem := &kv{key[:], rlpSlotValue}
  1781  		trie.MustUpdate(elem.k, elem.v)
  1782  		entries = append(entries, elem)
  1783  	}
  1784  	slices.SortFunc(entries, (*kv).cmp)
  1785  	root, nodes := trie.Commit(false)
  1786  	return root, nodes, entries
  1787  }
  1788  
  1789  // makeUnevenStorageTrie constructs a storage tries will states distributed in
  1790  // different range unevenly.
  1791  func makeUnevenStorageTrie(owner common.Hash, slots int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1792  	var (
  1793  		entries []*kv
  1794  		tr, _   = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1795  		chosen  = make(map[byte]struct{})
  1796  	)
  1797  	for i := 0; i < 3; i++ {
  1798  		var n int
  1799  		for {
  1800  			n = mrand.Intn(15) // the last range is set empty deliberately
  1801  			if _, ok := chosen[byte(n)]; ok {
  1802  				continue
  1803  			}
  1804  			chosen[byte(n)] = struct{}{}
  1805  			break
  1806  		}
  1807  		for j := 0; j < slots/3; j++ {
  1808  			key := append([]byte{byte(n)}, testrand.Bytes(31)...)
  1809  			val, _ := rlp.EncodeToBytes(testrand.Bytes(32))
  1810  
  1811  			elem := &kv{key, val}
  1812  			tr.MustUpdate(elem.k, elem.v)
  1813  			entries = append(entries, elem)
  1814  		}
  1815  	}
  1816  	slices.SortFunc(entries, (*kv).cmp)
  1817  	root, nodes := tr.Commit(false)
  1818  	return root, nodes, entries
  1819  }
  1820  
  1821  func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1822  	t.Helper()
  1823  	triedb := triedb.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
  1824  	accTrie, err := trie.New(trie.StateTrieID(root), triedb)
  1825  	if err != nil {
  1826  		t.Fatal(err)
  1827  	}
  1828  	accounts, slots := 0, 0
  1829  	accIt := trie.NewIterator(accTrie.MustNodeIterator(nil))
  1830  	for accIt.Next() {
  1831  		var acc struct {
  1832  			Nonce    uint64
  1833  			Balance  *big.Int
  1834  			Root     common.Hash
  1835  			CodeHash []byte
  1836  		}
  1837  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1838  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1839  		}
  1840  		accounts++
  1841  		if acc.Root != types.EmptyRootHash {
  1842  			id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root)
  1843  			storeTrie, err := trie.NewStateTrie(id, triedb)
  1844  			if err != nil {
  1845  				t.Fatal(err)
  1846  			}
  1847  			storeIt := trie.NewIterator(storeTrie.MustNodeIterator(nil))
  1848  			for storeIt.Next() {
  1849  				slots++
  1850  			}
  1851  			if err := storeIt.Err; err != nil {
  1852  				t.Fatal(err)
  1853  			}
  1854  		}
  1855  	}
  1856  	if err := accIt.Err; err != nil {
  1857  		t.Fatal(err)
  1858  	}
  1859  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1860  }
  1861  
  1862  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1863  // state healing
  1864  func TestSyncAccountPerformance(t *testing.T) {
  1865  	// These tests must not run in parallel: they modify the
  1866  	// global var accountConcurrency
  1867  	// t.Parallel()
  1868  	testSyncAccountPerformance(t, rawdb.HashScheme)
  1869  	testSyncAccountPerformance(t, rawdb.PathScheme)
  1870  }
  1871  
  1872  func testSyncAccountPerformance(t *testing.T, scheme string) {
  1873  	// Set the account concurrency to 1. This _should_ result in the
  1874  	// range root to become correct, and there should be no healing needed
  1875  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1876  	accountConcurrency = 1
  1877  
  1878  	var (
  1879  		once   sync.Once
  1880  		cancel = make(chan struct{})
  1881  		term   = func() {
  1882  			once.Do(func() {
  1883  				close(cancel)
  1884  			})
  1885  		}
  1886  	)
  1887  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
  1888  
  1889  	mkSource := func(name string) *testPeer {
  1890  		source := newTestPeer(name, t, term)
  1891  		source.accountTrie = sourceAccountTrie.Copy()
  1892  		source.accountValues = elems
  1893  		return source
  1894  	}
  1895  	src := mkSource("source")
  1896  	syncer := setupSyncer(nodeScheme, src)
  1897  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1898  		t.Fatalf("sync failed: %v", err)
  1899  	}
  1900  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1901  	// The trie root will always be requested, since it is added when the snap
  1902  	// sync cycle starts. When popping the queue, we do not look it up again.
  1903  	// Doing so would bring this number down to zero in this artificial testcase,
  1904  	// but only add extra IO for no reason in practice.
  1905  	if have, want := src.nTrienodeRequests, 1; have != want {
  1906  		fmt.Print(src.Stats())
  1907  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1908  	}
  1909  }
  1910  
  1911  func TestSlotEstimation(t *testing.T) {
  1912  	for i, tc := range []struct {
  1913  		last  common.Hash
  1914  		count int
  1915  		want  uint64
  1916  	}{
  1917  		{
  1918  			// Half the space
  1919  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1920  			100,
  1921  			100,
  1922  		},
  1923  		{
  1924  			// 1 / 16th
  1925  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1926  			100,
  1927  			1500,
  1928  		},
  1929  		{
  1930  			// Bit more than 1 / 16th
  1931  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1932  			100,
  1933  			1499,
  1934  		},
  1935  		{
  1936  			// Almost everything
  1937  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1938  			100,
  1939  			6,
  1940  		},
  1941  		{
  1942  			// Almost nothing -- should lead to error
  1943  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1944  			1,
  1945  			0,
  1946  		},
  1947  		{
  1948  			// Nothing -- should lead to error
  1949  			common.Hash{},
  1950  			100,
  1951  			0,
  1952  		},
  1953  	} {
  1954  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1955  		if want := tc.want; have != want {
  1956  			t.Errorf("test %d: have %d want %d", i, have, want)
  1957  		}
  1958  	}
  1959  }
  1960  
  1961  func newDbConfig(scheme string) *triedb.Config {
  1962  	if scheme == rawdb.HashScheme {
  1963  		return &triedb.Config{}
  1964  	}
  1965  	return &triedb.Config{PathDB: &pathdb.Config{SnapshotNoBuild: true}}
  1966  }