github.com/theQRL/go-zond@v0.1.1/zond/protocols/snap/sync_test.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/theQRL/go-zond/common"
    30  	"github.com/theQRL/go-zond/core/rawdb"
    31  	"github.com/theQRL/go-zond/core/types"
    32  	"github.com/theQRL/go-zond/crypto"
    33  	"github.com/theQRL/go-zond/light"
    34  	"github.com/theQRL/go-zond/log"
    35  	"github.com/theQRL/go-zond/rlp"
    36  	"github.com/theQRL/go-zond/trie"
    37  	"github.com/theQRL/go-zond/trie/triedb/pathdb"
    38  	"github.com/theQRL/go-zond/trie/trienode"
    39  	"github.com/theQRL/go-zond/zonddb"
    40  	"golang.org/x/crypto/sha3"
    41  	"golang.org/x/exp/slices"
    42  )
    43  
    44  func TestHashing(t *testing.T) {
    45  	t.Parallel()
    46  
    47  	var bytecodes = make([][]byte, 10)
    48  	for i := 0; i < len(bytecodes); i++ {
    49  		buf := make([]byte, 100)
    50  		rand.Read(buf)
    51  		bytecodes[i] = buf
    52  	}
    53  	var want, got string
    54  	var old = func() {
    55  		hasher := sha3.NewLegacyKeccak256()
    56  		for i := 0; i < len(bytecodes); i++ {
    57  			hasher.Reset()
    58  			hasher.Write(bytecodes[i])
    59  			hash := hasher.Sum(nil)
    60  			got = fmt.Sprintf("%v\n%v", got, hash)
    61  		}
    62  	}
    63  	var new = func() {
    64  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    65  		var hash = make([]byte, 32)
    66  		for i := 0; i < len(bytecodes); i++ {
    67  			hasher.Reset()
    68  			hasher.Write(bytecodes[i])
    69  			hasher.Read(hash)
    70  			want = fmt.Sprintf("%v\n%v", want, hash)
    71  		}
    72  	}
    73  	old()
    74  	new()
    75  	if want != got {
    76  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    77  	}
    78  }
    79  
    80  func BenchmarkHashing(b *testing.B) {
    81  	var bytecodes = make([][]byte, 10000)
    82  	for i := 0; i < len(bytecodes); i++ {
    83  		buf := make([]byte, 100)
    84  		rand.Read(buf)
    85  		bytecodes[i] = buf
    86  	}
    87  	var old = func() {
    88  		hasher := sha3.NewLegacyKeccak256()
    89  		for i := 0; i < len(bytecodes); i++ {
    90  			hasher.Reset()
    91  			hasher.Write(bytecodes[i])
    92  			hasher.Sum(nil)
    93  		}
    94  	}
    95  	var new = func() {
    96  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    97  		var hash = make([]byte, 32)
    98  		for i := 0; i < len(bytecodes); i++ {
    99  			hasher.Reset()
   100  			hasher.Write(bytecodes[i])
   101  			hasher.Read(hash)
   102  		}
   103  	}
   104  	b.Run("old", func(b *testing.B) {
   105  		b.ReportAllocs()
   106  		for i := 0; i < b.N; i++ {
   107  			old()
   108  		}
   109  	})
   110  	b.Run("new", func(b *testing.B) {
   111  		b.ReportAllocs()
   112  		for i := 0; i < b.N; i++ {
   113  			new()
   114  		}
   115  	})
   116  }
   117  
   118  type (
   119  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   120  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   121  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   122  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   123  )
   124  
   125  type testPeer struct {
   126  	id            string
   127  	test          *testing.T
   128  	remote        *Syncer
   129  	logger        log.Logger
   130  	accountTrie   *trie.Trie
   131  	accountValues []*kv
   132  	storageTries  map[common.Hash]*trie.Trie
   133  	storageValues map[common.Hash][]*kv
   134  
   135  	accountRequestHandler accountHandlerFunc
   136  	storageRequestHandler storageHandlerFunc
   137  	trieRequestHandler    trieHandlerFunc
   138  	codeRequestHandler    codeHandlerFunc
   139  	term                  func()
   140  
   141  	// counters
   142  	nAccountRequests  int
   143  	nStorageRequests  int
   144  	nBytecodeRequests int
   145  	nTrienodeRequests int
   146  }
   147  
   148  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   149  	peer := &testPeer{
   150  		id:                    id,
   151  		test:                  t,
   152  		logger:                log.New("id", id),
   153  		accountRequestHandler: defaultAccountRequestHandler,
   154  		trieRequestHandler:    defaultTrieRequestHandler,
   155  		storageRequestHandler: defaultStorageRequestHandler,
   156  		codeRequestHandler:    defaultCodeRequestHandler,
   157  		term:                  term,
   158  	}
   159  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   160  	//peer.logger.SetHandler(stderrHandler)
   161  	return peer
   162  }
   163  
   164  func (t *testPeer) setStorageTries(tries map[common.Hash]*trie.Trie) {
   165  	t.storageTries = make(map[common.Hash]*trie.Trie)
   166  	for root, trie := range tries {
   167  		t.storageTries[root] = trie.Copy()
   168  	}
   169  }
   170  
   171  func (t *testPeer) ID() string      { return t.id }
   172  func (t *testPeer) Log() log.Logger { return t.logger }
   173  
   174  func (t *testPeer) Stats() string {
   175  	return fmt.Sprintf(`Account requests: %d
   176  Storage requests: %d
   177  Bytecode requests: %d
   178  Trienode requests: %d
   179  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   180  }
   181  
   182  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   183  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   184  	t.nAccountRequests++
   185  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   186  	return nil
   187  }
   188  
   189  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   190  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   191  	t.nTrienodeRequests++
   192  	go t.trieRequestHandler(t, id, root, paths, bytes)
   193  	return nil
   194  }
   195  
   196  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   197  	t.nStorageRequests++
   198  	if len(accounts) == 1 && origin != nil {
   199  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   200  	} else {
   201  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   202  	}
   203  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   204  	return nil
   205  }
   206  
   207  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   208  	t.nBytecodeRequests++
   209  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   210  	go t.codeRequestHandler(t, id, hashes, bytes)
   211  	return nil
   212  }
   213  
   214  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   215  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   216  	// Pass the response
   217  	var nodes [][]byte
   218  	for _, pathset := range paths {
   219  		switch len(pathset) {
   220  		case 1:
   221  			blob, _, err := t.accountTrie.GetNode(pathset[0])
   222  			if err != nil {
   223  				t.logger.Info("Error handling req", "error", err)
   224  				break
   225  			}
   226  			nodes = append(nodes, blob)
   227  		default:
   228  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   229  			for _, path := range pathset[1:] {
   230  				blob, _, err := account.GetNode(path)
   231  				if err != nil {
   232  					t.logger.Info("Error handling req", "error", err)
   233  					break
   234  				}
   235  				nodes = append(nodes, blob)
   236  			}
   237  		}
   238  	}
   239  	t.remote.OnTrieNodes(t, requestId, nodes)
   240  	return nil
   241  }
   242  
   243  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   244  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   245  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   246  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   247  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   248  		t.term()
   249  		return err
   250  	}
   251  	return nil
   252  }
   253  
   254  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   255  	var size uint64
   256  	if limit == (common.Hash{}) {
   257  		limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   258  	}
   259  	for _, entry := range t.accountValues {
   260  		if size > cap {
   261  			break
   262  		}
   263  		if bytes.Compare(origin[:], entry.k) <= 0 {
   264  			keys = append(keys, common.BytesToHash(entry.k))
   265  			vals = append(vals, entry.v)
   266  			size += uint64(32 + len(entry.v))
   267  		}
   268  		// If we've exceeded the request threshold, abort
   269  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   270  			break
   271  		}
   272  	}
   273  	// Unless we send the entire trie, we need to supply proofs
   274  	// Actually, we need to supply proofs either way! This seems to be an implementation
   275  	// quirk in go-ethereum
   276  	proof := light.NewNodeSet()
   277  	if err := t.accountTrie.Prove(origin[:], proof); err != nil {
   278  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   279  	}
   280  	if len(keys) > 0 {
   281  		lastK := (keys[len(keys)-1])[:]
   282  		if err := t.accountTrie.Prove(lastK, proof); err != nil {
   283  			t.logger.Error("Could not prove last item", "error", err)
   284  		}
   285  	}
   286  	for _, blob := range proof.NodeList() {
   287  		proofs = append(proofs, blob)
   288  	}
   289  	return keys, vals, proofs
   290  }
   291  
   292  // defaultStorageRequestHandler is a well-behaving storage request handler
   293  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   294  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   295  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   296  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   297  		t.term()
   298  	}
   299  	return nil
   300  }
   301  
   302  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   303  	var bytecodes [][]byte
   304  	for _, h := range hashes {
   305  		bytecodes = append(bytecodes, getCodeByHash(h))
   306  	}
   307  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   308  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   309  		t.term()
   310  	}
   311  	return nil
   312  }
   313  
   314  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   315  	var size uint64
   316  	for _, account := range accounts {
   317  		// The first account might start from a different origin and end sooner
   318  		var originHash common.Hash
   319  		if len(origin) > 0 {
   320  			originHash = common.BytesToHash(origin)
   321  		}
   322  		var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   323  		if len(limit) > 0 {
   324  			limitHash = common.BytesToHash(limit)
   325  		}
   326  		var (
   327  			keys  []common.Hash
   328  			vals  [][]byte
   329  			abort bool
   330  		)
   331  		for _, entry := range t.storageValues[account] {
   332  			if size >= max {
   333  				abort = true
   334  				break
   335  			}
   336  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   337  				continue
   338  			}
   339  			keys = append(keys, common.BytesToHash(entry.k))
   340  			vals = append(vals, entry.v)
   341  			size += uint64(32 + len(entry.v))
   342  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   343  				break
   344  			}
   345  		}
   346  		if len(keys) > 0 {
   347  			hashes = append(hashes, keys)
   348  			slots = append(slots, vals)
   349  		}
   350  		// Generate the Merkle proofs for the first and last storage slot, but
   351  		// only if the response was capped. If the entire storage trie included
   352  		// in the response, no need for any proofs.
   353  		if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
   354  			// If we're aborting, we need to prove the first and last item
   355  			// This terminates the response (and thus the loop)
   356  			proof := light.NewNodeSet()
   357  			stTrie := t.storageTries[account]
   358  
   359  			// Here's a potential gotcha: when constructing the proof, we cannot
   360  			// use the 'origin' slice directly, but must use the full 32-byte
   361  			// hash form.
   362  			if err := stTrie.Prove(originHash[:], proof); err != nil {
   363  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   364  			}
   365  			if len(keys) > 0 {
   366  				lastK := (keys[len(keys)-1])[:]
   367  				if err := stTrie.Prove(lastK, proof); err != nil {
   368  					t.logger.Error("Could not prove last item", "error", err)
   369  				}
   370  			}
   371  			for _, blob := range proof.NodeList() {
   372  				proofs = append(proofs, blob)
   373  			}
   374  			break
   375  		}
   376  	}
   377  	return hashes, slots, proofs
   378  }
   379  
   380  // createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always
   381  // supplies the proof for the last account, even if it is 'complete'.
   382  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   383  	var size uint64
   384  	max = max * 3 / 4
   385  
   386  	var origin common.Hash
   387  	if len(bOrigin) > 0 {
   388  		origin = common.BytesToHash(bOrigin)
   389  	}
   390  	var exit bool
   391  	for i, account := range accounts {
   392  		var keys []common.Hash
   393  		var vals [][]byte
   394  		for _, entry := range t.storageValues[account] {
   395  			if bytes.Compare(entry.k, origin[:]) < 0 {
   396  				exit = true
   397  			}
   398  			keys = append(keys, common.BytesToHash(entry.k))
   399  			vals = append(vals, entry.v)
   400  			size += uint64(32 + len(entry.v))
   401  			if size > max {
   402  				exit = true
   403  			}
   404  		}
   405  		if i == len(accounts)-1 {
   406  			exit = true
   407  		}
   408  		hashes = append(hashes, keys)
   409  		slots = append(slots, vals)
   410  
   411  		if exit {
   412  			// If we're aborting, we need to prove the first and last item
   413  			// This terminates the response (and thus the loop)
   414  			proof := light.NewNodeSet()
   415  			stTrie := t.storageTries[account]
   416  
   417  			// Here's a potential gotcha: when constructing the proof, we cannot
   418  			// use the 'origin' slice directly, but must use the full 32-byte
   419  			// hash form.
   420  			if err := stTrie.Prove(origin[:], proof); err != nil {
   421  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   422  					"error", err)
   423  			}
   424  			if len(keys) > 0 {
   425  				lastK := (keys[len(keys)-1])[:]
   426  				if err := stTrie.Prove(lastK, proof); err != nil {
   427  					t.logger.Error("Could not prove last item", "error", err)
   428  				}
   429  			}
   430  			for _, blob := range proof.NodeList() {
   431  				proofs = append(proofs, blob)
   432  			}
   433  			break
   434  		}
   435  	}
   436  	return hashes, slots, proofs
   437  }
   438  
   439  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   440  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   441  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   442  	return nil
   443  }
   444  
   445  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   446  	return nil
   447  }
   448  
   449  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   450  	t.remote.OnTrieNodes(t, requestId, nil)
   451  	return nil
   452  }
   453  
   454  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   455  	return nil
   456  }
   457  
   458  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   459  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   460  	return nil
   461  }
   462  
   463  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   464  	return nil
   465  }
   466  
   467  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   468  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   469  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   470  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   471  		t.term()
   472  	}
   473  	return nil
   474  }
   475  
   476  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   477  //	var bytecodes [][]byte
   478  //	t.remote.OnByteCodes(t, id, bytecodes)
   479  //	return nil
   480  //}
   481  
   482  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   483  	var bytecodes [][]byte
   484  	for _, h := range hashes {
   485  		// Send back the hashes
   486  		bytecodes = append(bytecodes, h[:])
   487  	}
   488  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   489  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   490  		// Mimic the real-life handler, which drops a peer on errors
   491  		t.remote.Unregister(t.id)
   492  	}
   493  	return nil
   494  }
   495  
   496  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   497  	var bytecodes [][]byte
   498  	for _, h := range hashes[:1] {
   499  		bytecodes = append(bytecodes, getCodeByHash(h))
   500  	}
   501  	// Missing bytecode can be retrieved again, no error expected
   502  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   503  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   504  		t.term()
   505  	}
   506  	return nil
   507  }
   508  
   509  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   510  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   511  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   512  }
   513  
   514  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   515  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   516  }
   517  
   518  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   519  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   520  //}
   521  
   522  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   523  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   524  	if len(proofs) > 0 {
   525  		proofs = proofs[1:]
   526  	}
   527  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   528  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   529  		// Mimic the real-life handler, which drops a peer on errors
   530  		t.remote.Unregister(t.id)
   531  	}
   532  	return nil
   533  }
   534  
   535  // corruptStorageRequestHandler doesn't provide good proofs
   536  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   537  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   538  	if len(proofs) > 0 {
   539  		proofs = proofs[1:]
   540  	}
   541  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   542  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   543  		// Mimic the real-life handler, which drops a peer on errors
   544  		t.remote.Unregister(t.id)
   545  	}
   546  	return nil
   547  }
   548  
   549  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   550  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   551  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   552  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   553  		// Mimic the real-life handler, which drops a peer on errors
   554  		t.remote.Unregister(t.id)
   555  	}
   556  	return nil
   557  }
   558  
   559  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   560  // also ship the entire trie inside the proof. If the attack is successful,
   561  // the remote side does not do any follow-up requests
   562  func TestSyncBloatedProof(t *testing.T) {
   563  	t.Parallel()
   564  
   565  	testSyncBloatedProof(t, rawdb.HashScheme)
   566  	testSyncBloatedProof(t, rawdb.PathScheme)
   567  }
   568  
   569  func testSyncBloatedProof(t *testing.T, scheme string) {
   570  	var (
   571  		once   sync.Once
   572  		cancel = make(chan struct{})
   573  		term   = func() {
   574  			once.Do(func() {
   575  				close(cancel)
   576  			})
   577  		}
   578  	)
   579  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   580  	source := newTestPeer("source", t, term)
   581  	source.accountTrie = sourceAccountTrie.Copy()
   582  	source.accountValues = elems
   583  
   584  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   585  		var (
   586  			proofs [][]byte
   587  			keys   []common.Hash
   588  			vals   [][]byte
   589  		)
   590  		// The values
   591  		for _, entry := range t.accountValues {
   592  			if bytes.Compare(entry.k, origin[:]) < 0 {
   593  				continue
   594  			}
   595  			if bytes.Compare(entry.k, limit[:]) > 0 {
   596  				continue
   597  			}
   598  			keys = append(keys, common.BytesToHash(entry.k))
   599  			vals = append(vals, entry.v)
   600  		}
   601  		// The proofs
   602  		proof := light.NewNodeSet()
   603  		if err := t.accountTrie.Prove(origin[:], proof); err != nil {
   604  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   605  		}
   606  		// The bloat: add proof of every single element
   607  		for _, entry := range t.accountValues {
   608  			if err := t.accountTrie.Prove(entry.k, proof); err != nil {
   609  				t.logger.Error("Could not prove item", "error", err)
   610  			}
   611  		}
   612  		// And remove one item from the elements
   613  		if len(keys) > 2 {
   614  			keys = append(keys[:1], keys[2:]...)
   615  			vals = append(vals[:1], vals[2:]...)
   616  		}
   617  		for _, blob := range proof.NodeList() {
   618  			proofs = append(proofs, blob)
   619  		}
   620  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   621  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   622  			t.term()
   623  			// This is actually correct, signal to exit the test successfully
   624  		}
   625  		return nil
   626  	}
   627  	syncer := setupSyncer(nodeScheme, source)
   628  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   629  		t.Fatal("No error returned from incomplete/cancelled sync")
   630  	}
   631  }
   632  
   633  func setupSyncer(scheme string, peers ...*testPeer) *Syncer {
   634  	stateDb := rawdb.NewMemoryDatabase()
   635  	syncer := NewSyncer(stateDb, scheme)
   636  	for _, peer := range peers {
   637  		syncer.Register(peer)
   638  		peer.remote = syncer
   639  	}
   640  	return syncer
   641  }
   642  
   643  // TestSync tests a basic sync with one peer
   644  func TestSync(t *testing.T) {
   645  	t.Parallel()
   646  
   647  	testSync(t, rawdb.HashScheme)
   648  	testSync(t, rawdb.PathScheme)
   649  }
   650  
   651  func testSync(t *testing.T, scheme string) {
   652  	var (
   653  		once   sync.Once
   654  		cancel = make(chan struct{})
   655  		term   = func() {
   656  			once.Do(func() {
   657  				close(cancel)
   658  			})
   659  		}
   660  	)
   661  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   662  
   663  	mkSource := func(name string) *testPeer {
   664  		source := newTestPeer(name, t, term)
   665  		source.accountTrie = sourceAccountTrie.Copy()
   666  		source.accountValues = elems
   667  		return source
   668  	}
   669  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   670  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   671  		t.Fatalf("sync failed: %v", err)
   672  	}
   673  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   674  }
   675  
   676  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   677  // panic within the prover
   678  func TestSyncTinyTriePanic(t *testing.T) {
   679  	t.Parallel()
   680  
   681  	testSyncTinyTriePanic(t, rawdb.HashScheme)
   682  	testSyncTinyTriePanic(t, rawdb.PathScheme)
   683  }
   684  
   685  func testSyncTinyTriePanic(t *testing.T, scheme string) {
   686  	var (
   687  		once   sync.Once
   688  		cancel = make(chan struct{})
   689  		term   = func() {
   690  			once.Do(func() {
   691  				close(cancel)
   692  			})
   693  		}
   694  	)
   695  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1, scheme)
   696  
   697  	mkSource := func(name string) *testPeer {
   698  		source := newTestPeer(name, t, term)
   699  		source.accountTrie = sourceAccountTrie.Copy()
   700  		source.accountValues = elems
   701  		return source
   702  	}
   703  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   704  	done := checkStall(t, term)
   705  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   706  		t.Fatalf("sync failed: %v", err)
   707  	}
   708  	close(done)
   709  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   710  }
   711  
   712  // TestMultiSync tests a basic sync with multiple peers
   713  func TestMultiSync(t *testing.T) {
   714  	t.Parallel()
   715  
   716  	testMultiSync(t, rawdb.HashScheme)
   717  	testMultiSync(t, rawdb.PathScheme)
   718  }
   719  
   720  func testMultiSync(t *testing.T, scheme string) {
   721  	var (
   722  		once   sync.Once
   723  		cancel = make(chan struct{})
   724  		term   = func() {
   725  			once.Do(func() {
   726  				close(cancel)
   727  			})
   728  		}
   729  	)
   730  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   731  
   732  	mkSource := func(name string) *testPeer {
   733  		source := newTestPeer(name, t, term)
   734  		source.accountTrie = sourceAccountTrie.Copy()
   735  		source.accountValues = elems
   736  		return source
   737  	}
   738  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"), mkSource("sourceB"))
   739  	done := checkStall(t, term)
   740  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   741  		t.Fatalf("sync failed: %v", err)
   742  	}
   743  	close(done)
   744  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   745  }
   746  
   747  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   748  func TestSyncWithStorage(t *testing.T) {
   749  	t.Parallel()
   750  
   751  	testSyncWithStorage(t, rawdb.HashScheme)
   752  	testSyncWithStorage(t, rawdb.PathScheme)
   753  }
   754  
   755  func testSyncWithStorage(t *testing.T, scheme string) {
   756  	var (
   757  		once   sync.Once
   758  		cancel = make(chan struct{})
   759  		term   = func() {
   760  			once.Do(func() {
   761  				close(cancel)
   762  			})
   763  		}
   764  	)
   765  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false)
   766  
   767  	mkSource := func(name string) *testPeer {
   768  		source := newTestPeer(name, t, term)
   769  		source.accountTrie = sourceAccountTrie.Copy()
   770  		source.accountValues = elems
   771  		source.setStorageTries(storageTries)
   772  		source.storageValues = storageElems
   773  		return source
   774  	}
   775  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
   776  	done := checkStall(t, term)
   777  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   778  		t.Fatalf("sync failed: %v", err)
   779  	}
   780  	close(done)
   781  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   782  }
   783  
   784  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   785  func TestMultiSyncManyUseless(t *testing.T) {
   786  	t.Parallel()
   787  
   788  	testMultiSyncManyUseless(t, rawdb.HashScheme)
   789  	testMultiSyncManyUseless(t, rawdb.PathScheme)
   790  }
   791  
   792  func testMultiSyncManyUseless(t *testing.T, scheme string) {
   793  	var (
   794  		once   sync.Once
   795  		cancel = make(chan struct{})
   796  		term   = func() {
   797  			once.Do(func() {
   798  				close(cancel)
   799  			})
   800  		}
   801  	)
   802  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
   803  
   804  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   805  		source := newTestPeer(name, t, term)
   806  		source.accountTrie = sourceAccountTrie.Copy()
   807  		source.accountValues = elems
   808  		source.setStorageTries(storageTries)
   809  		source.storageValues = storageElems
   810  
   811  		if !noAccount {
   812  			source.accountRequestHandler = emptyRequestAccountRangeFn
   813  		}
   814  		if !noStorage {
   815  			source.storageRequestHandler = emptyStorageRequestHandler
   816  		}
   817  		if !noTrieNode {
   818  			source.trieRequestHandler = emptyTrieRequestHandler
   819  		}
   820  		return source
   821  	}
   822  
   823  	syncer := setupSyncer(
   824  		nodeScheme,
   825  		mkSource("full", true, true, true),
   826  		mkSource("noAccounts", false, true, true),
   827  		mkSource("noStorage", true, false, true),
   828  		mkSource("noTrie", true, true, false),
   829  	)
   830  	done := checkStall(t, term)
   831  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   832  		t.Fatalf("sync failed: %v", err)
   833  	}
   834  	close(done)
   835  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   836  }
   837  
   838  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   839  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   840  	t.Parallel()
   841  
   842  	testMultiSyncManyUselessWithLowTimeout(t, rawdb.HashScheme)
   843  	testMultiSyncManyUselessWithLowTimeout(t, rawdb.PathScheme)
   844  }
   845  
   846  func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
   847  	var (
   848  		once   sync.Once
   849  		cancel = make(chan struct{})
   850  		term   = func() {
   851  			once.Do(func() {
   852  				close(cancel)
   853  			})
   854  		}
   855  	)
   856  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
   857  
   858  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   859  		source := newTestPeer(name, t, term)
   860  		source.accountTrie = sourceAccountTrie.Copy()
   861  		source.accountValues = elems
   862  		source.setStorageTries(storageTries)
   863  		source.storageValues = storageElems
   864  
   865  		if !noAccount {
   866  			source.accountRequestHandler = emptyRequestAccountRangeFn
   867  		}
   868  		if !noStorage {
   869  			source.storageRequestHandler = emptyStorageRequestHandler
   870  		}
   871  		if !noTrieNode {
   872  			source.trieRequestHandler = emptyTrieRequestHandler
   873  		}
   874  		return source
   875  	}
   876  
   877  	syncer := setupSyncer(
   878  		nodeScheme,
   879  		mkSource("full", true, true, true),
   880  		mkSource("noAccounts", false, true, true),
   881  		mkSource("noStorage", true, false, true),
   882  		mkSource("noTrie", true, true, false),
   883  	)
   884  	// We're setting the timeout to very low, to increase the chance of the timeout
   885  	// being triggered. This was previously a cause of panic, when a response
   886  	// arrived simultaneously as a timeout was triggered.
   887  	syncer.rates.OverrideTTLLimit = time.Millisecond
   888  
   889  	done := checkStall(t, term)
   890  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   891  		t.Fatalf("sync failed: %v", err)
   892  	}
   893  	close(done)
   894  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   895  }
   896  
   897  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   898  func TestMultiSyncManyUnresponsive(t *testing.T) {
   899  	t.Parallel()
   900  
   901  	testMultiSyncManyUnresponsive(t, rawdb.HashScheme)
   902  	testMultiSyncManyUnresponsive(t, rawdb.PathScheme)
   903  }
   904  
   905  func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
   906  	var (
   907  		once   sync.Once
   908  		cancel = make(chan struct{})
   909  		term   = func() {
   910  			once.Do(func() {
   911  				close(cancel)
   912  			})
   913  		}
   914  	)
   915  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
   916  
   917  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   918  		source := newTestPeer(name, t, term)
   919  		source.accountTrie = sourceAccountTrie.Copy()
   920  		source.accountValues = elems
   921  		source.setStorageTries(storageTries)
   922  		source.storageValues = storageElems
   923  
   924  		if !noAccount {
   925  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   926  		}
   927  		if !noStorage {
   928  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   929  		}
   930  		if !noTrieNode {
   931  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   932  		}
   933  		return source
   934  	}
   935  
   936  	syncer := setupSyncer(
   937  		nodeScheme,
   938  		mkSource("full", true, true, true),
   939  		mkSource("noAccounts", false, true, true),
   940  		mkSource("noStorage", true, false, true),
   941  		mkSource("noTrie", true, true, false),
   942  	)
   943  	// We're setting the timeout to very low, to make the test run a bit faster
   944  	syncer.rates.OverrideTTLLimit = time.Millisecond
   945  
   946  	done := checkStall(t, term)
   947  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   948  		t.Fatalf("sync failed: %v", err)
   949  	}
   950  	close(done)
   951  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   952  }
   953  
   954  func checkStall(t *testing.T, term func()) chan struct{} {
   955  	testDone := make(chan struct{})
   956  	go func() {
   957  		select {
   958  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   959  			t.Log("Sync stalled")
   960  			term()
   961  		case <-testDone:
   962  			return
   963  		}
   964  	}()
   965  	return testDone
   966  }
   967  
   968  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   969  // account trie has a few boundary elements.
   970  func TestSyncBoundaryAccountTrie(t *testing.T) {
   971  	t.Parallel()
   972  
   973  	testSyncBoundaryAccountTrie(t, rawdb.HashScheme)
   974  	testSyncBoundaryAccountTrie(t, rawdb.PathScheme)
   975  }
   976  
   977  func testSyncBoundaryAccountTrie(t *testing.T, scheme string) {
   978  	var (
   979  		once   sync.Once
   980  		cancel = make(chan struct{})
   981  		term   = func() {
   982  			once.Do(func() {
   983  				close(cancel)
   984  			})
   985  		}
   986  	)
   987  	nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(scheme, 3000)
   988  
   989  	mkSource := func(name string) *testPeer {
   990  		source := newTestPeer(name, t, term)
   991  		source.accountTrie = sourceAccountTrie.Copy()
   992  		source.accountValues = elems
   993  		return source
   994  	}
   995  	syncer := setupSyncer(
   996  		nodeScheme,
   997  		mkSource("peer-a"),
   998  		mkSource("peer-b"),
   999  	)
  1000  	done := checkStall(t, term)
  1001  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1002  		t.Fatalf("sync failed: %v", err)
  1003  	}
  1004  	close(done)
  1005  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1006  }
  1007  
  1008  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
  1009  // consistently returning very small results
  1010  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
  1011  	t.Parallel()
  1012  
  1013  	testSyncNoStorageAndOneCappedPeer(t, rawdb.HashScheme)
  1014  	testSyncNoStorageAndOneCappedPeer(t, rawdb.PathScheme)
  1015  }
  1016  
  1017  func testSyncNoStorageAndOneCappedPeer(t *testing.T, scheme string) {
  1018  	var (
  1019  		once   sync.Once
  1020  		cancel = make(chan struct{})
  1021  		term   = func() {
  1022  			once.Do(func() {
  1023  				close(cancel)
  1024  			})
  1025  		}
  1026  	)
  1027  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1028  
  1029  	mkSource := func(name string, slow bool) *testPeer {
  1030  		source := newTestPeer(name, t, term)
  1031  		source.accountTrie = sourceAccountTrie.Copy()
  1032  		source.accountValues = elems
  1033  
  1034  		if slow {
  1035  			source.accountRequestHandler = starvingAccountRequestHandler
  1036  		}
  1037  		return source
  1038  	}
  1039  
  1040  	syncer := setupSyncer(
  1041  		nodeScheme,
  1042  		mkSource("nice-a", false),
  1043  		mkSource("nice-b", false),
  1044  		mkSource("nice-c", false),
  1045  		mkSource("capped", true),
  1046  	)
  1047  	done := checkStall(t, term)
  1048  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1049  		t.Fatalf("sync failed: %v", err)
  1050  	}
  1051  	close(done)
  1052  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1053  }
  1054  
  1055  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  1056  // code requests properly.
  1057  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  1058  	t.Parallel()
  1059  
  1060  	testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.HashScheme)
  1061  	testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.PathScheme)
  1062  }
  1063  
  1064  func testSyncNoStorageAndOneCodeCorruptPeer(t *testing.T, scheme string) {
  1065  	var (
  1066  		once   sync.Once
  1067  		cancel = make(chan struct{})
  1068  		term   = func() {
  1069  			once.Do(func() {
  1070  				close(cancel)
  1071  			})
  1072  		}
  1073  	)
  1074  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1075  
  1076  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1077  		source := newTestPeer(name, t, term)
  1078  		source.accountTrie = sourceAccountTrie.Copy()
  1079  		source.accountValues = elems
  1080  		source.codeRequestHandler = codeFn
  1081  		return source
  1082  	}
  1083  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1084  	// chance that the full set of codes requested are sent only to the
  1085  	// non-corrupt peer, which delivers everything in one go, and makes the
  1086  	// test moot
  1087  	syncer := setupSyncer(
  1088  		nodeScheme,
  1089  		mkSource("capped", cappedCodeRequestHandler),
  1090  		mkSource("corrupt", corruptCodeRequestHandler),
  1091  	)
  1092  	done := checkStall(t, term)
  1093  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1094  		t.Fatalf("sync failed: %v", err)
  1095  	}
  1096  	close(done)
  1097  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1098  }
  1099  
  1100  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1101  	t.Parallel()
  1102  
  1103  	testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.HashScheme)
  1104  	testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.PathScheme)
  1105  }
  1106  
  1107  func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) {
  1108  	var (
  1109  		once   sync.Once
  1110  		cancel = make(chan struct{})
  1111  		term   = func() {
  1112  			once.Do(func() {
  1113  				close(cancel)
  1114  			})
  1115  		}
  1116  	)
  1117  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1118  
  1119  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1120  		source := newTestPeer(name, t, term)
  1121  		source.accountTrie = sourceAccountTrie.Copy()
  1122  		source.accountValues = elems
  1123  		source.accountRequestHandler = accFn
  1124  		return source
  1125  	}
  1126  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1127  	// chance that the full set of codes requested are sent only to the
  1128  	// non-corrupt peer, which delivers everything in one go, and makes the
  1129  	// test moot
  1130  	syncer := setupSyncer(
  1131  		nodeScheme,
  1132  		mkSource("capped", defaultAccountRequestHandler),
  1133  		mkSource("corrupt", corruptAccountRequestHandler),
  1134  	)
  1135  	done := checkStall(t, term)
  1136  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1137  		t.Fatalf("sync failed: %v", err)
  1138  	}
  1139  	close(done)
  1140  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1141  }
  1142  
  1143  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1144  // one by one
  1145  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1146  	t.Parallel()
  1147  
  1148  	testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.HashScheme)
  1149  	testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.PathScheme)
  1150  }
  1151  
  1152  func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) {
  1153  	var (
  1154  		once   sync.Once
  1155  		cancel = make(chan struct{})
  1156  		term   = func() {
  1157  			once.Do(func() {
  1158  				close(cancel)
  1159  			})
  1160  		}
  1161  	)
  1162  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1163  
  1164  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1165  		source := newTestPeer(name, t, term)
  1166  		source.accountTrie = sourceAccountTrie.Copy()
  1167  		source.accountValues = elems
  1168  		source.codeRequestHandler = codeFn
  1169  		return source
  1170  	}
  1171  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1172  	// so it shouldn't be more than that
  1173  	var counter int
  1174  	syncer := setupSyncer(
  1175  		nodeScheme,
  1176  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1177  			counter++
  1178  			return cappedCodeRequestHandler(t, id, hashes, max)
  1179  		}),
  1180  	)
  1181  	done := checkStall(t, term)
  1182  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1183  		t.Fatalf("sync failed: %v", err)
  1184  	}
  1185  	close(done)
  1186  
  1187  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1188  	// deduplication is per request batch. If it were a perfect global dedup,
  1189  	// we would expect only 8 requests. If there were no dedup, there would be
  1190  	// 3k requests.
  1191  	// We expect somewhere below 100 requests for these 8 unique hashes. But
  1192  	// the number can be flaky, so don't limit it so strictly.
  1193  	if threshold := 100; counter > threshold {
  1194  		t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
  1195  	}
  1196  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1197  }
  1198  
  1199  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1200  // storage trie has a few boundary elements.
  1201  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1202  	t.Parallel()
  1203  
  1204  	testSyncBoundaryStorageTrie(t, rawdb.HashScheme)
  1205  	testSyncBoundaryStorageTrie(t, rawdb.PathScheme)
  1206  }
  1207  
  1208  func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
  1209  	var (
  1210  		once   sync.Once
  1211  		cancel = make(chan struct{})
  1212  		term   = func() {
  1213  			once.Do(func() {
  1214  				close(cancel)
  1215  			})
  1216  		}
  1217  	)
  1218  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true)
  1219  
  1220  	mkSource := func(name string) *testPeer {
  1221  		source := newTestPeer(name, t, term)
  1222  		source.accountTrie = sourceAccountTrie.Copy()
  1223  		source.accountValues = elems
  1224  		source.setStorageTries(storageTries)
  1225  		source.storageValues = storageElems
  1226  		return source
  1227  	}
  1228  	syncer := setupSyncer(
  1229  		nodeScheme,
  1230  		mkSource("peer-a"),
  1231  		mkSource("peer-b"),
  1232  	)
  1233  	done := checkStall(t, term)
  1234  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1235  		t.Fatalf("sync failed: %v", err)
  1236  	}
  1237  	close(done)
  1238  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1239  }
  1240  
  1241  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1242  // consistently returning very small results
  1243  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1244  	t.Parallel()
  1245  
  1246  	testSyncWithStorageAndOneCappedPeer(t, rawdb.HashScheme)
  1247  	testSyncWithStorageAndOneCappedPeer(t, rawdb.PathScheme)
  1248  }
  1249  
  1250  func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
  1251  	var (
  1252  		once   sync.Once
  1253  		cancel = make(chan struct{})
  1254  		term   = func() {
  1255  			once.Do(func() {
  1256  				close(cancel)
  1257  			})
  1258  		}
  1259  	)
  1260  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false)
  1261  
  1262  	mkSource := func(name string, slow bool) *testPeer {
  1263  		source := newTestPeer(name, t, term)
  1264  		source.accountTrie = sourceAccountTrie.Copy()
  1265  		source.accountValues = elems
  1266  		source.setStorageTries(storageTries)
  1267  		source.storageValues = storageElems
  1268  
  1269  		if slow {
  1270  			source.storageRequestHandler = starvingStorageRequestHandler
  1271  		}
  1272  		return source
  1273  	}
  1274  
  1275  	syncer := setupSyncer(
  1276  		nodeScheme,
  1277  		mkSource("nice-a", false),
  1278  		mkSource("slow", true),
  1279  	)
  1280  	done := checkStall(t, term)
  1281  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1282  		t.Fatalf("sync failed: %v", err)
  1283  	}
  1284  	close(done)
  1285  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1286  }
  1287  
  1288  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1289  // sometimes sending bad proofs
  1290  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1291  	t.Parallel()
  1292  
  1293  	testSyncWithStorageAndCorruptPeer(t, rawdb.HashScheme)
  1294  	testSyncWithStorageAndCorruptPeer(t, rawdb.PathScheme)
  1295  }
  1296  
  1297  func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
  1298  	var (
  1299  		once   sync.Once
  1300  		cancel = make(chan struct{})
  1301  		term   = func() {
  1302  			once.Do(func() {
  1303  				close(cancel)
  1304  			})
  1305  		}
  1306  	)
  1307  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
  1308  
  1309  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1310  		source := newTestPeer(name, t, term)
  1311  		source.accountTrie = sourceAccountTrie.Copy()
  1312  		source.accountValues = elems
  1313  		source.setStorageTries(storageTries)
  1314  		source.storageValues = storageElems
  1315  		source.storageRequestHandler = handler
  1316  		return source
  1317  	}
  1318  
  1319  	syncer := setupSyncer(
  1320  		nodeScheme,
  1321  		mkSource("nice-a", defaultStorageRequestHandler),
  1322  		mkSource("nice-b", defaultStorageRequestHandler),
  1323  		mkSource("nice-c", defaultStorageRequestHandler),
  1324  		mkSource("corrupt", corruptStorageRequestHandler),
  1325  	)
  1326  	done := checkStall(t, term)
  1327  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1328  		t.Fatalf("sync failed: %v", err)
  1329  	}
  1330  	close(done)
  1331  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1332  }
  1333  
  1334  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1335  	t.Parallel()
  1336  
  1337  	testSyncWithStorageAndNonProvingPeer(t, rawdb.HashScheme)
  1338  	testSyncWithStorageAndNonProvingPeer(t, rawdb.PathScheme)
  1339  }
  1340  
  1341  func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
  1342  	var (
  1343  		once   sync.Once
  1344  		cancel = make(chan struct{})
  1345  		term   = func() {
  1346  			once.Do(func() {
  1347  				close(cancel)
  1348  			})
  1349  		}
  1350  	)
  1351  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
  1352  
  1353  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1354  		source := newTestPeer(name, t, term)
  1355  		source.accountTrie = sourceAccountTrie.Copy()
  1356  		source.accountValues = elems
  1357  		source.setStorageTries(storageTries)
  1358  		source.storageValues = storageElems
  1359  		source.storageRequestHandler = handler
  1360  		return source
  1361  	}
  1362  	syncer := setupSyncer(
  1363  		nodeScheme,
  1364  		mkSource("nice-a", defaultStorageRequestHandler),
  1365  		mkSource("nice-b", defaultStorageRequestHandler),
  1366  		mkSource("nice-c", defaultStorageRequestHandler),
  1367  		mkSource("corrupt", noProofStorageRequestHandler),
  1368  	)
  1369  	done := checkStall(t, term)
  1370  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1371  		t.Fatalf("sync failed: %v", err)
  1372  	}
  1373  	close(done)
  1374  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1375  }
  1376  
  1377  // TestSyncWithStorage tests  basic sync using accounts + storage + code, against
  1378  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1379  // an error, where the recipient erroneously clipped the boundary nodes, but
  1380  // did not mark the account for healing.
  1381  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1382  	t.Parallel()
  1383  
  1384  	testSyncWithStorageMisbehavingProve(t, rawdb.HashScheme)
  1385  	testSyncWithStorageMisbehavingProve(t, rawdb.PathScheme)
  1386  }
  1387  
  1388  func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
  1389  	var (
  1390  		once   sync.Once
  1391  		cancel = make(chan struct{})
  1392  		term   = func() {
  1393  			once.Do(func() {
  1394  				close(cancel)
  1395  			})
  1396  		}
  1397  	)
  1398  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(scheme, 10, 30, false)
  1399  
  1400  	mkSource := func(name string) *testPeer {
  1401  		source := newTestPeer(name, t, term)
  1402  		source.accountTrie = sourceAccountTrie.Copy()
  1403  		source.accountValues = elems
  1404  		source.setStorageTries(storageTries)
  1405  		source.storageValues = storageElems
  1406  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1407  		return source
  1408  	}
  1409  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
  1410  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1411  		t.Fatalf("sync failed: %v", err)
  1412  	}
  1413  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1414  }
  1415  
  1416  type kv struct {
  1417  	k, v []byte
  1418  }
  1419  
  1420  func (k *kv) cmp(other *kv) int {
  1421  	return bytes.Compare(k.k, other.k)
  1422  }
  1423  
  1424  func key32(i uint64) []byte {
  1425  	key := make([]byte, 32)
  1426  	binary.LittleEndian.PutUint64(key, i)
  1427  	return key
  1428  }
  1429  
  1430  var (
  1431  	codehashes = []common.Hash{
  1432  		crypto.Keccak256Hash([]byte{0}),
  1433  		crypto.Keccak256Hash([]byte{1}),
  1434  		crypto.Keccak256Hash([]byte{2}),
  1435  		crypto.Keccak256Hash([]byte{3}),
  1436  		crypto.Keccak256Hash([]byte{4}),
  1437  		crypto.Keccak256Hash([]byte{5}),
  1438  		crypto.Keccak256Hash([]byte{6}),
  1439  		crypto.Keccak256Hash([]byte{7}),
  1440  	}
  1441  )
  1442  
  1443  // getCodeHash returns a pseudo-random code hash
  1444  func getCodeHash(i uint64) []byte {
  1445  	h := codehashes[int(i)%len(codehashes)]
  1446  	return common.CopyBytes(h[:])
  1447  }
  1448  
  1449  // getCodeByHash convenience function to lookup the code from the code hash
  1450  func getCodeByHash(hash common.Hash) []byte {
  1451  	if hash == types.EmptyCodeHash {
  1452  		return nil
  1453  	}
  1454  	for i, h := range codehashes {
  1455  		if h == hash {
  1456  			return []byte{byte(i)}
  1457  		}
  1458  	}
  1459  	return nil
  1460  }
  1461  
  1462  // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1463  func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) {
  1464  	var (
  1465  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1466  		accTrie = trie.NewEmpty(db)
  1467  		entries []*kv
  1468  	)
  1469  	for i := uint64(1); i <= uint64(n); i++ {
  1470  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1471  			Nonce:    i,
  1472  			Balance:  big.NewInt(int64(i)),
  1473  			Root:     types.EmptyRootHash,
  1474  			CodeHash: getCodeHash(i),
  1475  		})
  1476  		key := key32(i)
  1477  		elem := &kv{key, value}
  1478  		accTrie.MustUpdate(elem.k, elem.v)
  1479  		entries = append(entries, elem)
  1480  	}
  1481  	slices.SortFunc(entries, (*kv).cmp)
  1482  
  1483  	// Commit the state changes into db and re-create the trie
  1484  	// for accessing later.
  1485  	root, nodes, _ := accTrie.Commit(false)
  1486  	db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
  1487  
  1488  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1489  	return db.Scheme(), accTrie, entries
  1490  }
  1491  
  1492  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1493  // accounts normally, this function will fill a few accounts which have
  1494  // boundary hash.
  1495  func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
  1496  	var (
  1497  		entries    []*kv
  1498  		boundaries []common.Hash
  1499  
  1500  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1501  		accTrie = trie.NewEmpty(db)
  1502  	)
  1503  	// Initialize boundaries
  1504  	var next common.Hash
  1505  	step := new(big.Int).Sub(
  1506  		new(big.Int).Div(
  1507  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1508  			big.NewInt(int64(accountConcurrency)),
  1509  		), common.Big1,
  1510  	)
  1511  	for i := 0; i < accountConcurrency; i++ {
  1512  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1513  		if i == accountConcurrency-1 {
  1514  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1515  		}
  1516  		boundaries = append(boundaries, last)
  1517  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1518  	}
  1519  	// Fill boundary accounts
  1520  	for i := 0; i < len(boundaries); i++ {
  1521  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1522  			Nonce:    uint64(0),
  1523  			Balance:  big.NewInt(int64(i)),
  1524  			Root:     types.EmptyRootHash,
  1525  			CodeHash: getCodeHash(uint64(i)),
  1526  		})
  1527  		elem := &kv{boundaries[i].Bytes(), value}
  1528  		accTrie.MustUpdate(elem.k, elem.v)
  1529  		entries = append(entries, elem)
  1530  	}
  1531  	// Fill other accounts if required
  1532  	for i := uint64(1); i <= uint64(n); i++ {
  1533  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1534  			Nonce:    i,
  1535  			Balance:  big.NewInt(int64(i)),
  1536  			Root:     types.EmptyRootHash,
  1537  			CodeHash: getCodeHash(i),
  1538  		})
  1539  		elem := &kv{key32(i), value}
  1540  		accTrie.MustUpdate(elem.k, elem.v)
  1541  		entries = append(entries, elem)
  1542  	}
  1543  	slices.SortFunc(entries, (*kv).cmp)
  1544  
  1545  	// Commit the state changes into db and re-create the trie
  1546  	// for accessing later.
  1547  	root, nodes, _ := accTrie.Commit(false)
  1548  	db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
  1549  
  1550  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1551  	return db.Scheme(), accTrie, entries
  1552  }
  1553  
  1554  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1555  // has a unique storage set.
  1556  func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
  1557  	var (
  1558  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1559  		accTrie        = trie.NewEmpty(db)
  1560  		entries        []*kv
  1561  		storageRoots   = make(map[common.Hash]common.Hash)
  1562  		storageTries   = make(map[common.Hash]*trie.Trie)
  1563  		storageEntries = make(map[common.Hash][]*kv)
  1564  		nodes          = trienode.NewMergedNodeSet()
  1565  	)
  1566  	// Create n accounts in the trie
  1567  	for i := uint64(1); i <= uint64(accounts); i++ {
  1568  		key := key32(i)
  1569  		codehash := types.EmptyCodeHash.Bytes()
  1570  		if code {
  1571  			codehash = getCodeHash(i)
  1572  		}
  1573  		// Create a storage trie
  1574  		stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
  1575  		nodes.Merge(stNodes)
  1576  
  1577  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1578  			Nonce:    i,
  1579  			Balance:  big.NewInt(int64(i)),
  1580  			Root:     stRoot,
  1581  			CodeHash: codehash,
  1582  		})
  1583  		elem := &kv{key, value}
  1584  		accTrie.MustUpdate(elem.k, elem.v)
  1585  		entries = append(entries, elem)
  1586  
  1587  		storageRoots[common.BytesToHash(key)] = stRoot
  1588  		storageEntries[common.BytesToHash(key)] = stEntries
  1589  	}
  1590  	slices.SortFunc(entries, (*kv).cmp)
  1591  
  1592  	// Commit account trie
  1593  	root, set, _ := accTrie.Commit(true)
  1594  	nodes.Merge(set)
  1595  
  1596  	// Commit gathered dirty nodes into database
  1597  	db.Update(root, types.EmptyRootHash, 0, nodes, nil)
  1598  
  1599  	// Re-create tries with new root
  1600  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1601  	for i := uint64(1); i <= uint64(accounts); i++ {
  1602  		key := key32(i)
  1603  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1604  		trie, _ := trie.New(id, db)
  1605  		storageTries[common.BytesToHash(key)] = trie
  1606  	}
  1607  	return db.Scheme(), accTrie, entries, storageTries, storageEntries
  1608  }
  1609  
  1610  // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1611  func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
  1612  	var (
  1613  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1614  		accTrie        = trie.NewEmpty(db)
  1615  		entries        []*kv
  1616  		storageRoots   = make(map[common.Hash]common.Hash)
  1617  		storageTries   = make(map[common.Hash]*trie.Trie)
  1618  		storageEntries = make(map[common.Hash][]*kv)
  1619  		nodes          = trienode.NewMergedNodeSet()
  1620  	)
  1621  	// Create n accounts in the trie
  1622  	for i := uint64(1); i <= uint64(accounts); i++ {
  1623  		key := key32(i)
  1624  		codehash := types.EmptyCodeHash.Bytes()
  1625  		if code {
  1626  			codehash = getCodeHash(i)
  1627  		}
  1628  		// Make a storage trie
  1629  		var (
  1630  			stRoot    common.Hash
  1631  			stNodes   *trienode.NodeSet
  1632  			stEntries []*kv
  1633  		)
  1634  		if boundary {
  1635  			stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
  1636  		} else {
  1637  			stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
  1638  		}
  1639  		nodes.Merge(stNodes)
  1640  
  1641  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1642  			Nonce:    i,
  1643  			Balance:  big.NewInt(int64(i)),
  1644  			Root:     stRoot,
  1645  			CodeHash: codehash,
  1646  		})
  1647  		elem := &kv{key, value}
  1648  		accTrie.MustUpdate(elem.k, elem.v)
  1649  		entries = append(entries, elem)
  1650  
  1651  		// we reuse the same one for all accounts
  1652  		storageRoots[common.BytesToHash(key)] = stRoot
  1653  		storageEntries[common.BytesToHash(key)] = stEntries
  1654  	}
  1655  	slices.SortFunc(entries, (*kv).cmp)
  1656  
  1657  	// Commit account trie
  1658  	root, set, _ := accTrie.Commit(true)
  1659  	nodes.Merge(set)
  1660  
  1661  	// Commit gathered dirty nodes into database
  1662  	db.Update(root, types.EmptyRootHash, 0, nodes, nil)
  1663  
  1664  	// Re-create tries with new root
  1665  	accTrie, err := trie.New(trie.StateTrieID(root), db)
  1666  	if err != nil {
  1667  		panic(err)
  1668  	}
  1669  	for i := uint64(1); i <= uint64(accounts); i++ {
  1670  		key := key32(i)
  1671  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1672  		trie, err := trie.New(id, db)
  1673  		if err != nil {
  1674  			panic(err)
  1675  		}
  1676  		storageTries[common.BytesToHash(key)] = trie
  1677  	}
  1678  	return db.Scheme(), accTrie, entries, storageTries, storageEntries
  1679  }
  1680  
  1681  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1682  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1683  // that tries are unique.
  1684  func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1685  	trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1686  	var entries []*kv
  1687  	for i := uint64(1); i <= n; i++ {
  1688  		// store 'x' at slot 'x'
  1689  		slotValue := key32(i + seed)
  1690  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1691  
  1692  		slotKey := key32(i)
  1693  		key := crypto.Keccak256Hash(slotKey[:])
  1694  
  1695  		elem := &kv{key[:], rlpSlotValue}
  1696  		trie.MustUpdate(elem.k, elem.v)
  1697  		entries = append(entries, elem)
  1698  	}
  1699  	slices.SortFunc(entries, (*kv).cmp)
  1700  	root, nodes, _ := trie.Commit(false)
  1701  	return root, nodes, entries
  1702  }
  1703  
  1704  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1705  // storage slots normally, this function will fill a few slots which have
  1706  // boundary hash.
  1707  func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1708  	var (
  1709  		entries    []*kv
  1710  		boundaries []common.Hash
  1711  		trie, _    = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1712  	)
  1713  	// Initialize boundaries
  1714  	var next common.Hash
  1715  	step := new(big.Int).Sub(
  1716  		new(big.Int).Div(
  1717  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1718  			big.NewInt(int64(accountConcurrency)),
  1719  		), common.Big1,
  1720  	)
  1721  	for i := 0; i < accountConcurrency; i++ {
  1722  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1723  		if i == accountConcurrency-1 {
  1724  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1725  		}
  1726  		boundaries = append(boundaries, last)
  1727  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1728  	}
  1729  	// Fill boundary slots
  1730  	for i := 0; i < len(boundaries); i++ {
  1731  		key := boundaries[i]
  1732  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1733  
  1734  		elem := &kv{key[:], val}
  1735  		trie.MustUpdate(elem.k, elem.v)
  1736  		entries = append(entries, elem)
  1737  	}
  1738  	// Fill other slots if required
  1739  	for i := uint64(1); i <= uint64(n); i++ {
  1740  		slotKey := key32(i)
  1741  		key := crypto.Keccak256Hash(slotKey[:])
  1742  
  1743  		slotValue := key32(i)
  1744  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1745  
  1746  		elem := &kv{key[:], rlpSlotValue}
  1747  		trie.MustUpdate(elem.k, elem.v)
  1748  		entries = append(entries, elem)
  1749  	}
  1750  	slices.SortFunc(entries, (*kv).cmp)
  1751  	root, nodes, _ := trie.Commit(false)
  1752  	return root, nodes, entries
  1753  }
  1754  
  1755  func verifyTrie(scheme string, db zonddb.KeyValueStore, root common.Hash, t *testing.T) {
  1756  	t.Helper()
  1757  	triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
  1758  	accTrie, err := trie.New(trie.StateTrieID(root), triedb)
  1759  	if err != nil {
  1760  		t.Fatal(err)
  1761  	}
  1762  	accounts, slots := 0, 0
  1763  	accIt := trie.NewIterator(accTrie.MustNodeIterator(nil))
  1764  	for accIt.Next() {
  1765  		var acc struct {
  1766  			Nonce    uint64
  1767  			Balance  *big.Int
  1768  			Root     common.Hash
  1769  			CodeHash []byte
  1770  		}
  1771  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1772  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1773  		}
  1774  		accounts++
  1775  		if acc.Root != types.EmptyRootHash {
  1776  			id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root)
  1777  			storeTrie, err := trie.NewStateTrie(id, triedb)
  1778  			if err != nil {
  1779  				t.Fatal(err)
  1780  			}
  1781  			storeIt := trie.NewIterator(storeTrie.MustNodeIterator(nil))
  1782  			for storeIt.Next() {
  1783  				slots++
  1784  			}
  1785  			if err := storeIt.Err; err != nil {
  1786  				t.Fatal(err)
  1787  			}
  1788  		}
  1789  	}
  1790  	if err := accIt.Err; err != nil {
  1791  		t.Fatal(err)
  1792  	}
  1793  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1794  }
  1795  
  1796  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1797  // state healing
  1798  func TestSyncAccountPerformance(t *testing.T) {
  1799  	t.Parallel()
  1800  
  1801  	testSyncAccountPerformance(t, rawdb.HashScheme)
  1802  	testSyncAccountPerformance(t, rawdb.PathScheme)
  1803  }
  1804  
  1805  func testSyncAccountPerformance(t *testing.T, scheme string) {
  1806  	// Set the account concurrency to 1. This _should_ result in the
  1807  	// range root to become correct, and there should be no healing needed
  1808  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1809  	accountConcurrency = 1
  1810  
  1811  	var (
  1812  		once   sync.Once
  1813  		cancel = make(chan struct{})
  1814  		term   = func() {
  1815  			once.Do(func() {
  1816  				close(cancel)
  1817  			})
  1818  		}
  1819  	)
  1820  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
  1821  
  1822  	mkSource := func(name string) *testPeer {
  1823  		source := newTestPeer(name, t, term)
  1824  		source.accountTrie = sourceAccountTrie.Copy()
  1825  		source.accountValues = elems
  1826  		return source
  1827  	}
  1828  	src := mkSource("source")
  1829  	syncer := setupSyncer(nodeScheme, src)
  1830  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1831  		t.Fatalf("sync failed: %v", err)
  1832  	}
  1833  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1834  	// The trie root will always be requested, since it is added when the snap
  1835  	// sync cycle starts. When popping the queue, we do not look it up again.
  1836  	// Doing so would bring this number down to zero in this artificial testcase,
  1837  	// but only add extra IO for no reason in practice.
  1838  	if have, want := src.nTrienodeRequests, 1; have != want {
  1839  		fmt.Print(src.Stats())
  1840  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1841  	}
  1842  }
  1843  
  1844  func TestSlotEstimation(t *testing.T) {
  1845  	for i, tc := range []struct {
  1846  		last  common.Hash
  1847  		count int
  1848  		want  uint64
  1849  	}{
  1850  		{
  1851  			// Half the space
  1852  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1853  			100,
  1854  			100,
  1855  		},
  1856  		{
  1857  			// 1 / 16th
  1858  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1859  			100,
  1860  			1500,
  1861  		},
  1862  		{
  1863  			// Bit more than 1 / 16th
  1864  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1865  			100,
  1866  			1499,
  1867  		},
  1868  		{
  1869  			// Almost everything
  1870  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1871  			100,
  1872  			6,
  1873  		},
  1874  		{
  1875  			// Almost nothing -- should lead to error
  1876  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1877  			1,
  1878  			0,
  1879  		},
  1880  		{
  1881  			// Nothing -- should lead to error
  1882  			common.Hash{},
  1883  			100,
  1884  			0,
  1885  		},
  1886  	} {
  1887  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1888  		if want := tc.want; have != want {
  1889  			t.Errorf("test %d: have %d want %d", i, have, want)
  1890  		}
  1891  	}
  1892  }
  1893  
  1894  func newDbConfig(scheme string) *trie.Config {
  1895  	if scheme == rawdb.HashScheme {
  1896  		return &trie.Config{}
  1897  	}
  1898  	return &trie.Config{PathDB: pathdb.Defaults}
  1899  }