github.com/theQRL/go-zond@v0.2.1/zond/protocols/snap/sync_test.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"slices"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/theQRL/go-zond/common"
    32  	"github.com/theQRL/go-zond/core/rawdb"
    33  	"github.com/theQRL/go-zond/core/types"
    34  	"github.com/theQRL/go-zond/crypto"
    35  	"github.com/theQRL/go-zond/log"
    36  	"github.com/theQRL/go-zond/rlp"
    37  	"github.com/theQRL/go-zond/trie"
    38  	"github.com/theQRL/go-zond/trie/testutil"
    39  	"github.com/theQRL/go-zond/trie/triedb/pathdb"
    40  	"github.com/theQRL/go-zond/trie/trienode"
    41  	"github.com/theQRL/go-zond/zonddb"
    42  	"golang.org/x/crypto/sha3"
    43  )
    44  
    45  func TestHashing(t *testing.T) {
    46  	t.Parallel()
    47  
    48  	var bytecodes = make([][]byte, 10)
    49  	for i := 0; i < len(bytecodes); i++ {
    50  		buf := make([]byte, 100)
    51  		rand.Read(buf)
    52  		bytecodes[i] = buf
    53  	}
    54  	var want, got string
    55  	var old = func() {
    56  		hasher := sha3.NewLegacyKeccak256()
    57  		for i := 0; i < len(bytecodes); i++ {
    58  			hasher.Reset()
    59  			hasher.Write(bytecodes[i])
    60  			hash := hasher.Sum(nil)
    61  			got = fmt.Sprintf("%v\n%v", got, hash)
    62  		}
    63  	}
    64  	var new = func() {
    65  		hasher := crypto.NewKeccakState()
    66  		var hash = make([]byte, 32)
    67  		for i := 0; i < len(bytecodes); i++ {
    68  			hasher.Reset()
    69  			hasher.Write(bytecodes[i])
    70  			hasher.Read(hash)
    71  			want = fmt.Sprintf("%v\n%v", want, hash)
    72  		}
    73  	}
    74  	old()
    75  	new()
    76  	if want != got {
    77  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    78  	}
    79  }
    80  
    81  func BenchmarkHashing(b *testing.B) {
    82  	var bytecodes = make([][]byte, 10000)
    83  	for i := 0; i < len(bytecodes); i++ {
    84  		buf := make([]byte, 100)
    85  		rand.Read(buf)
    86  		bytecodes[i] = buf
    87  	}
    88  	var old = func() {
    89  		hasher := sha3.NewLegacyKeccak256()
    90  		for i := 0; i < len(bytecodes); i++ {
    91  			hasher.Reset()
    92  			hasher.Write(bytecodes[i])
    93  			hasher.Sum(nil)
    94  		}
    95  	}
    96  	var new = func() {
    97  		hasher := crypto.NewKeccakState()
    98  		var hash = make([]byte, 32)
    99  		for i := 0; i < len(bytecodes); i++ {
   100  			hasher.Reset()
   101  			hasher.Write(bytecodes[i])
   102  			hasher.Read(hash)
   103  		}
   104  	}
   105  	b.Run("old", func(b *testing.B) {
   106  		b.ReportAllocs()
   107  		for i := 0; i < b.N; i++ {
   108  			old()
   109  		}
   110  	})
   111  	b.Run("new", func(b *testing.B) {
   112  		b.ReportAllocs()
   113  		for i := 0; i < b.N; i++ {
   114  			new()
   115  		}
   116  	})
   117  }
   118  
   119  type (
   120  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   121  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   122  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   123  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   124  )
   125  
   126  type testPeer struct {
   127  	id            string
   128  	test          *testing.T
   129  	remote        *Syncer
   130  	logger        log.Logger
   131  	accountTrie   *trie.Trie
   132  	accountValues []*kv
   133  	storageTries  map[common.Hash]*trie.Trie
   134  	storageValues map[common.Hash][]*kv
   135  
   136  	accountRequestHandler accountHandlerFunc
   137  	storageRequestHandler storageHandlerFunc
   138  	trieRequestHandler    trieHandlerFunc
   139  	codeRequestHandler    codeHandlerFunc
   140  	term                  func()
   141  
   142  	// counters
   143  	nAccountRequests  int
   144  	nStorageRequests  int
   145  	nBytecodeRequests int
   146  	nTrienodeRequests int
   147  }
   148  
   149  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   150  	peer := &testPeer{
   151  		id:                    id,
   152  		test:                  t,
   153  		logger:                log.New("id", id),
   154  		accountRequestHandler: defaultAccountRequestHandler,
   155  		trieRequestHandler:    defaultTrieRequestHandler,
   156  		storageRequestHandler: defaultStorageRequestHandler,
   157  		codeRequestHandler:    defaultCodeRequestHandler,
   158  		term:                  term,
   159  	}
   160  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   161  	//peer.logger.SetHandler(stderrHandler)
   162  	return peer
   163  }
   164  
   165  func (t *testPeer) setStorageTries(tries map[common.Hash]*trie.Trie) {
   166  	t.storageTries = make(map[common.Hash]*trie.Trie)
   167  	for root, trie := range tries {
   168  		t.storageTries[root] = trie.Copy()
   169  	}
   170  }
   171  
   172  func (t *testPeer) ID() string      { return t.id }
   173  func (t *testPeer) Log() log.Logger { return t.logger }
   174  
   175  func (t *testPeer) Stats() string {
   176  	return fmt.Sprintf(`Account requests: %d
   177  Storage requests: %d
   178  Bytecode requests: %d
   179  Trienode requests: %d
   180  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   181  }
   182  
   183  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   184  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   185  	t.nAccountRequests++
   186  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   187  	return nil
   188  }
   189  
   190  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   191  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   192  	t.nTrienodeRequests++
   193  	go t.trieRequestHandler(t, id, root, paths, bytes)
   194  	return nil
   195  }
   196  
   197  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   198  	t.nStorageRequests++
   199  	if len(accounts) == 1 && origin != nil {
   200  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   201  	} else {
   202  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   203  	}
   204  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   205  	return nil
   206  }
   207  
   208  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   209  	t.nBytecodeRequests++
   210  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   211  	go t.codeRequestHandler(t, id, hashes, bytes)
   212  	return nil
   213  }
   214  
   215  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   216  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   217  	// Pass the response
   218  	var nodes [][]byte
   219  	for _, pathset := range paths {
   220  		switch len(pathset) {
   221  		case 1:
   222  			blob, _, err := t.accountTrie.GetNode(pathset[0])
   223  			if err != nil {
   224  				t.logger.Info("Error handling req", "error", err)
   225  				break
   226  			}
   227  			nodes = append(nodes, blob)
   228  		default:
   229  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   230  			for _, path := range pathset[1:] {
   231  				blob, _, err := account.GetNode(path)
   232  				if err != nil {
   233  					t.logger.Info("Error handling req", "error", err)
   234  					break
   235  				}
   236  				nodes = append(nodes, blob)
   237  			}
   238  		}
   239  	}
   240  	t.remote.OnTrieNodes(t, requestId, nodes)
   241  	return nil
   242  }
   243  
   244  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   245  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   246  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   247  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   248  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   249  		t.term()
   250  		return err
   251  	}
   252  	return nil
   253  }
   254  
   255  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   256  	var size uint64
   257  	if limit == (common.Hash{}) {
   258  		limit = common.MaxHash
   259  	}
   260  	for _, entry := range t.accountValues {
   261  		if size > cap {
   262  			break
   263  		}
   264  		if bytes.Compare(origin[:], entry.k) <= 0 {
   265  			keys = append(keys, common.BytesToHash(entry.k))
   266  			vals = append(vals, entry.v)
   267  			size += uint64(32 + len(entry.v))
   268  		}
   269  		// If we've exceeded the request threshold, abort
   270  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   271  			break
   272  		}
   273  	}
   274  	// Unless we send the entire trie, we need to supply proofs
   275  	// Actually, we need to supply proofs either way! This seems to be an implementation
   276  	// quirk in go-ethereum
   277  	proof := trienode.NewProofSet()
   278  	if err := t.accountTrie.Prove(origin[:], proof); err != nil {
   279  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   280  	}
   281  	if len(keys) > 0 {
   282  		lastK := (keys[len(keys)-1])[:]
   283  		if err := t.accountTrie.Prove(lastK, proof); err != nil {
   284  			t.logger.Error("Could not prove last item", "error", err)
   285  		}
   286  	}
   287  	for _, blob := range proof.List() {
   288  		proofs = append(proofs, blob)
   289  	}
   290  	return keys, vals, proofs
   291  }
   292  
   293  // defaultStorageRequestHandler is a well-behaving storage request handler
   294  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   295  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   296  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   297  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   298  		t.term()
   299  	}
   300  	return nil
   301  }
   302  
   303  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   304  	var bytecodes [][]byte
   305  	for _, h := range hashes {
   306  		bytecodes = append(bytecodes, getCodeByHash(h))
   307  	}
   308  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   309  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   310  		t.term()
   311  	}
   312  	return nil
   313  }
   314  
   315  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   316  	var size uint64
   317  	for _, account := range accounts {
   318  		// The first account might start from a different origin and end sooner
   319  		var originHash common.Hash
   320  		if len(origin) > 0 {
   321  			originHash = common.BytesToHash(origin)
   322  		}
   323  		var limitHash = common.MaxHash
   324  		if len(limit) > 0 {
   325  			limitHash = common.BytesToHash(limit)
   326  		}
   327  		var (
   328  			keys  []common.Hash
   329  			vals  [][]byte
   330  			abort bool
   331  		)
   332  		for _, entry := range t.storageValues[account] {
   333  			if size >= max {
   334  				abort = true
   335  				break
   336  			}
   337  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   338  				continue
   339  			}
   340  			keys = append(keys, common.BytesToHash(entry.k))
   341  			vals = append(vals, entry.v)
   342  			size += uint64(32 + len(entry.v))
   343  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   344  				break
   345  			}
   346  		}
   347  		if len(keys) > 0 {
   348  			hashes = append(hashes, keys)
   349  			slots = append(slots, vals)
   350  		}
   351  		// Generate the Merkle proofs for the first and last storage slot, but
   352  		// only if the response was capped. If the entire storage trie included
   353  		// in the response, no need for any proofs.
   354  		if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
   355  			// If we're aborting, we need to prove the first and last item
   356  			// This terminates the response (and thus the loop)
   357  			proof := trienode.NewProofSet()
   358  			stTrie := t.storageTries[account]
   359  
   360  			// Here's a potential gotcha: when constructing the proof, we cannot
   361  			// use the 'origin' slice directly, but must use the full 32-byte
   362  			// hash form.
   363  			if err := stTrie.Prove(originHash[:], proof); err != nil {
   364  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   365  			}
   366  			if len(keys) > 0 {
   367  				lastK := (keys[len(keys)-1])[:]
   368  				if err := stTrie.Prove(lastK, proof); err != nil {
   369  					t.logger.Error("Could not prove last item", "error", err)
   370  				}
   371  			}
   372  			for _, blob := range proof.List() {
   373  				proofs = append(proofs, blob)
   374  			}
   375  			break
   376  		}
   377  	}
   378  	return hashes, slots, proofs
   379  }
   380  
   381  // createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always
   382  // supplies the proof for the last account, even if it is 'complete'.
   383  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   384  	var size uint64
   385  	max = max * 3 / 4
   386  
   387  	var origin common.Hash
   388  	if len(bOrigin) > 0 {
   389  		origin = common.BytesToHash(bOrigin)
   390  	}
   391  	var exit bool
   392  	for i, account := range accounts {
   393  		var keys []common.Hash
   394  		var vals [][]byte
   395  		for _, entry := range t.storageValues[account] {
   396  			if bytes.Compare(entry.k, origin[:]) < 0 {
   397  				exit = true
   398  			}
   399  			keys = append(keys, common.BytesToHash(entry.k))
   400  			vals = append(vals, entry.v)
   401  			size += uint64(32 + len(entry.v))
   402  			if size > max {
   403  				exit = true
   404  			}
   405  		}
   406  		if i == len(accounts)-1 {
   407  			exit = true
   408  		}
   409  		hashes = append(hashes, keys)
   410  		slots = append(slots, vals)
   411  
   412  		if exit {
   413  			// If we're aborting, we need to prove the first and last item
   414  			// This terminates the response (and thus the loop)
   415  			proof := trienode.NewProofSet()
   416  			stTrie := t.storageTries[account]
   417  
   418  			// Here's a potential gotcha: when constructing the proof, we cannot
   419  			// use the 'origin' slice directly, but must use the full 32-byte
   420  			// hash form.
   421  			if err := stTrie.Prove(origin[:], proof); err != nil {
   422  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   423  					"error", err)
   424  			}
   425  			if len(keys) > 0 {
   426  				lastK := (keys[len(keys)-1])[:]
   427  				if err := stTrie.Prove(lastK, proof); err != nil {
   428  					t.logger.Error("Could not prove last item", "error", err)
   429  				}
   430  			}
   431  			for _, blob := range proof.List() {
   432  				proofs = append(proofs, blob)
   433  			}
   434  			break
   435  		}
   436  	}
   437  	return hashes, slots, proofs
   438  }
   439  
   440  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   441  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   442  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   443  	return nil
   444  }
   445  
   446  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   447  	return nil
   448  }
   449  
   450  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   451  	t.remote.OnTrieNodes(t, requestId, nil)
   452  	return nil
   453  }
   454  
   455  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   456  	return nil
   457  }
   458  
   459  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   460  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   461  	return nil
   462  }
   463  
   464  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   465  	return nil
   466  }
   467  
   468  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   469  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   470  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   471  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   472  		t.term()
   473  	}
   474  	return nil
   475  }
   476  
   477  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   478  //	var bytecodes [][]byte
   479  //	t.remote.OnByteCodes(t, id, bytecodes)
   480  //	return nil
   481  //}
   482  
   483  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   484  	var bytecodes [][]byte
   485  	for _, h := range hashes {
   486  		// Send back the hashes
   487  		bytecodes = append(bytecodes, h[:])
   488  	}
   489  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   490  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   491  		// Mimic the real-life handler, which drops a peer on errors
   492  		t.remote.Unregister(t.id)
   493  	}
   494  	return nil
   495  }
   496  
   497  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   498  	var bytecodes [][]byte
   499  	for _, h := range hashes[:1] {
   500  		bytecodes = append(bytecodes, getCodeByHash(h))
   501  	}
   502  	// Missing bytecode can be retrieved again, no error expected
   503  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   504  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   505  		t.term()
   506  	}
   507  	return nil
   508  }
   509  
   510  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   511  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   512  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   513  }
   514  
   515  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   516  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   517  }
   518  
   519  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   520  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   521  //}
   522  
   523  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   524  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   525  	if len(proofs) > 0 {
   526  		proofs = proofs[1:]
   527  	}
   528  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   529  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   530  		// Mimic the real-life handler, which drops a peer on errors
   531  		t.remote.Unregister(t.id)
   532  	}
   533  	return nil
   534  }
   535  
   536  // corruptStorageRequestHandler doesn't provide good proofs
   537  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   538  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   539  	if len(proofs) > 0 {
   540  		proofs = proofs[1:]
   541  	}
   542  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   543  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   544  		// Mimic the real-life handler, which drops a peer on errors
   545  		t.remote.Unregister(t.id)
   546  	}
   547  	return nil
   548  }
   549  
   550  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   551  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   552  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   553  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   554  		// Mimic the real-life handler, which drops a peer on errors
   555  		t.remote.Unregister(t.id)
   556  	}
   557  	return nil
   558  }
   559  
   560  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   561  // also ship the entire trie inside the proof. If the attack is successful,
   562  // the remote side does not do any follow-up requests
   563  func TestSyncBloatedProof(t *testing.T) {
   564  	t.Parallel()
   565  
   566  	testSyncBloatedProof(t, rawdb.HashScheme)
   567  	testSyncBloatedProof(t, rawdb.PathScheme)
   568  }
   569  
   570  func testSyncBloatedProof(t *testing.T, scheme string) {
   571  	var (
   572  		once   sync.Once
   573  		cancel = make(chan struct{})
   574  		term   = func() {
   575  			once.Do(func() {
   576  				close(cancel)
   577  			})
   578  		}
   579  	)
   580  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   581  	source := newTestPeer("source", t, term)
   582  	source.accountTrie = sourceAccountTrie.Copy()
   583  	source.accountValues = elems
   584  
   585  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   586  		var (
   587  			proofs [][]byte
   588  			keys   []common.Hash
   589  			vals   [][]byte
   590  		)
   591  		// The values
   592  		for _, entry := range t.accountValues {
   593  			if bytes.Compare(entry.k, origin[:]) < 0 {
   594  				continue
   595  			}
   596  			if bytes.Compare(entry.k, limit[:]) > 0 {
   597  				continue
   598  			}
   599  			keys = append(keys, common.BytesToHash(entry.k))
   600  			vals = append(vals, entry.v)
   601  		}
   602  		// The proofs
   603  		proof := trienode.NewProofSet()
   604  		if err := t.accountTrie.Prove(origin[:], proof); err != nil {
   605  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   606  		}
   607  		// The bloat: add proof of every single element
   608  		for _, entry := range t.accountValues {
   609  			if err := t.accountTrie.Prove(entry.k, proof); err != nil {
   610  				t.logger.Error("Could not prove item", "error", err)
   611  			}
   612  		}
   613  		// And remove one item from the elements
   614  		if len(keys) > 2 {
   615  			keys = append(keys[:1], keys[2:]...)
   616  			vals = append(vals[:1], vals[2:]...)
   617  		}
   618  		for _, blob := range proof.List() {
   619  			proofs = append(proofs, blob)
   620  		}
   621  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   622  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   623  			t.term()
   624  			// This is actually correct, signal to exit the test successfully
   625  		}
   626  		return nil
   627  	}
   628  	syncer := setupSyncer(nodeScheme, source)
   629  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   630  		t.Fatal("No error returned from incomplete/cancelled sync")
   631  	}
   632  }
   633  
   634  func setupSyncer(scheme string, peers ...*testPeer) *Syncer {
   635  	stateDb := rawdb.NewMemoryDatabase()
   636  	syncer := NewSyncer(stateDb, scheme)
   637  	for _, peer := range peers {
   638  		syncer.Register(peer)
   639  		peer.remote = syncer
   640  	}
   641  	return syncer
   642  }
   643  
   644  // TestSync tests a basic sync with one peer
   645  func TestSync(t *testing.T) {
   646  	t.Parallel()
   647  
   648  	testSync(t, rawdb.HashScheme)
   649  	testSync(t, rawdb.PathScheme)
   650  }
   651  
   652  func testSync(t *testing.T, scheme string) {
   653  	var (
   654  		once   sync.Once
   655  		cancel = make(chan struct{})
   656  		term   = func() {
   657  			once.Do(func() {
   658  				close(cancel)
   659  			})
   660  		}
   661  	)
   662  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   663  
   664  	mkSource := func(name string) *testPeer {
   665  		source := newTestPeer(name, t, term)
   666  		source.accountTrie = sourceAccountTrie.Copy()
   667  		source.accountValues = elems
   668  		return source
   669  	}
   670  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   671  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   672  		t.Fatalf("sync failed: %v", err)
   673  	}
   674  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   675  }
   676  
   677  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   678  // panic within the prover
   679  func TestSyncTinyTriePanic(t *testing.T) {
   680  	t.Parallel()
   681  
   682  	testSyncTinyTriePanic(t, rawdb.HashScheme)
   683  	testSyncTinyTriePanic(t, rawdb.PathScheme)
   684  }
   685  
   686  func testSyncTinyTriePanic(t *testing.T, scheme string) {
   687  	var (
   688  		once   sync.Once
   689  		cancel = make(chan struct{})
   690  		term   = func() {
   691  			once.Do(func() {
   692  				close(cancel)
   693  			})
   694  		}
   695  	)
   696  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1, scheme)
   697  
   698  	mkSource := func(name string) *testPeer {
   699  		source := newTestPeer(name, t, term)
   700  		source.accountTrie = sourceAccountTrie.Copy()
   701  		source.accountValues = elems
   702  		return source
   703  	}
   704  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   705  	done := checkStall(t, term)
   706  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   707  		t.Fatalf("sync failed: %v", err)
   708  	}
   709  	close(done)
   710  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   711  }
   712  
   713  // TestMultiSync tests a basic sync with multiple peers
   714  func TestMultiSync(t *testing.T) {
   715  	t.Parallel()
   716  
   717  	testMultiSync(t, rawdb.HashScheme)
   718  	testMultiSync(t, rawdb.PathScheme)
   719  }
   720  
   721  func testMultiSync(t *testing.T, scheme string) {
   722  	var (
   723  		once   sync.Once
   724  		cancel = make(chan struct{})
   725  		term   = func() {
   726  			once.Do(func() {
   727  				close(cancel)
   728  			})
   729  		}
   730  	)
   731  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
   732  
   733  	mkSource := func(name string) *testPeer {
   734  		source := newTestPeer(name, t, term)
   735  		source.accountTrie = sourceAccountTrie.Copy()
   736  		source.accountValues = elems
   737  		return source
   738  	}
   739  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"), mkSource("sourceB"))
   740  	done := checkStall(t, term)
   741  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   742  		t.Fatalf("sync failed: %v", err)
   743  	}
   744  	close(done)
   745  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   746  }
   747  
   748  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   749  func TestSyncWithStorage(t *testing.T) {
   750  	t.Parallel()
   751  
   752  	testSyncWithStorage(t, rawdb.HashScheme)
   753  	testSyncWithStorage(t, rawdb.PathScheme)
   754  }
   755  
   756  func testSyncWithStorage(t *testing.T, scheme string) {
   757  	var (
   758  		once   sync.Once
   759  		cancel = make(chan struct{})
   760  		term   = func() {
   761  			once.Do(func() {
   762  				close(cancel)
   763  			})
   764  		}
   765  	)
   766  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false, false)
   767  
   768  	mkSource := func(name string) *testPeer {
   769  		source := newTestPeer(name, t, term)
   770  		source.accountTrie = sourceAccountTrie.Copy()
   771  		source.accountValues = elems
   772  		source.setStorageTries(storageTries)
   773  		source.storageValues = storageElems
   774  		return source
   775  	}
   776  	syncer := setupSyncer(scheme, mkSource("sourceA"))
   777  	done := checkStall(t, term)
   778  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   779  		t.Fatalf("sync failed: %v", err)
   780  	}
   781  	close(done)
   782  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   783  }
   784  
   785  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   786  func TestMultiSyncManyUseless(t *testing.T) {
   787  	t.Parallel()
   788  
   789  	testMultiSyncManyUseless(t, rawdb.HashScheme)
   790  	testMultiSyncManyUseless(t, rawdb.PathScheme)
   791  }
   792  
   793  func testMultiSyncManyUseless(t *testing.T, scheme string) {
   794  	var (
   795  		once   sync.Once
   796  		cancel = make(chan struct{})
   797  		term   = func() {
   798  			once.Do(func() {
   799  				close(cancel)
   800  			})
   801  		}
   802  	)
   803  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
   804  
   805  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   806  		source := newTestPeer(name, t, term)
   807  		source.accountTrie = sourceAccountTrie.Copy()
   808  		source.accountValues = elems
   809  		source.setStorageTries(storageTries)
   810  		source.storageValues = storageElems
   811  
   812  		if !noAccount {
   813  			source.accountRequestHandler = emptyRequestAccountRangeFn
   814  		}
   815  		if !noStorage {
   816  			source.storageRequestHandler = emptyStorageRequestHandler
   817  		}
   818  		if !noTrieNode {
   819  			source.trieRequestHandler = emptyTrieRequestHandler
   820  		}
   821  		return source
   822  	}
   823  
   824  	syncer := setupSyncer(
   825  		scheme,
   826  		mkSource("full", true, true, true),
   827  		mkSource("noAccounts", false, true, true),
   828  		mkSource("noStorage", true, false, true),
   829  		mkSource("noTrie", true, true, false),
   830  	)
   831  	done := checkStall(t, term)
   832  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   833  		t.Fatalf("sync failed: %v", err)
   834  	}
   835  	close(done)
   836  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   837  }
   838  
   839  // TestMultiSyncManyUselessWithLowTimeout contains one good peer, and many which doesn't return anything valuable at all
   840  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   841  	t.Parallel()
   842  
   843  	testMultiSyncManyUselessWithLowTimeout(t, rawdb.HashScheme)
   844  	testMultiSyncManyUselessWithLowTimeout(t, rawdb.PathScheme)
   845  }
   846  
   847  func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
   848  	var (
   849  		once   sync.Once
   850  		cancel = make(chan struct{})
   851  		term   = func() {
   852  			once.Do(func() {
   853  				close(cancel)
   854  			})
   855  		}
   856  	)
   857  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
   858  
   859  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   860  		source := newTestPeer(name, t, term)
   861  		source.accountTrie = sourceAccountTrie.Copy()
   862  		source.accountValues = elems
   863  		source.setStorageTries(storageTries)
   864  		source.storageValues = storageElems
   865  
   866  		if !noAccount {
   867  			source.accountRequestHandler = emptyRequestAccountRangeFn
   868  		}
   869  		if !noStorage {
   870  			source.storageRequestHandler = emptyStorageRequestHandler
   871  		}
   872  		if !noTrieNode {
   873  			source.trieRequestHandler = emptyTrieRequestHandler
   874  		}
   875  		return source
   876  	}
   877  
   878  	syncer := setupSyncer(
   879  		scheme,
   880  		mkSource("full", true, true, true),
   881  		mkSource("noAccounts", false, true, true),
   882  		mkSource("noStorage", true, false, true),
   883  		mkSource("noTrie", true, true, false),
   884  	)
   885  	// We're setting the timeout to very low, to increase the chance of the timeout
   886  	// being triggered. This was previously a cause of panic, when a response
   887  	// arrived simultaneously as a timeout was triggered.
   888  	syncer.rates.OverrideTTLLimit = time.Millisecond
   889  
   890  	done := checkStall(t, term)
   891  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   892  		t.Fatalf("sync failed: %v", err)
   893  	}
   894  	close(done)
   895  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   896  }
   897  
   898  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   899  func TestMultiSyncManyUnresponsive(t *testing.T) {
   900  	t.Parallel()
   901  
   902  	testMultiSyncManyUnresponsive(t, rawdb.HashScheme)
   903  	testMultiSyncManyUnresponsive(t, rawdb.PathScheme)
   904  }
   905  
   906  func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
   907  	var (
   908  		once   sync.Once
   909  		cancel = make(chan struct{})
   910  		term   = func() {
   911  			once.Do(func() {
   912  				close(cancel)
   913  			})
   914  		}
   915  	)
   916  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
   917  
   918  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   919  		source := newTestPeer(name, t, term)
   920  		source.accountTrie = sourceAccountTrie.Copy()
   921  		source.accountValues = elems
   922  		source.setStorageTries(storageTries)
   923  		source.storageValues = storageElems
   924  
   925  		if !noAccount {
   926  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   927  		}
   928  		if !noStorage {
   929  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   930  		}
   931  		if !noTrieNode {
   932  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   933  		}
   934  		return source
   935  	}
   936  
   937  	syncer := setupSyncer(
   938  		scheme,
   939  		mkSource("full", true, true, true),
   940  		mkSource("noAccounts", false, true, true),
   941  		mkSource("noStorage", true, false, true),
   942  		mkSource("noTrie", true, true, false),
   943  	)
   944  	// We're setting the timeout to very low, to make the test run a bit faster
   945  	syncer.rates.OverrideTTLLimit = time.Millisecond
   946  
   947  	done := checkStall(t, term)
   948  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   949  		t.Fatalf("sync failed: %v", err)
   950  	}
   951  	close(done)
   952  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
   953  }
   954  
   955  func checkStall(t *testing.T, term func()) chan struct{} {
   956  	testDone := make(chan struct{})
   957  	go func() {
   958  		select {
   959  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   960  			t.Log("Sync stalled")
   961  			term()
   962  		case <-testDone:
   963  			return
   964  		}
   965  	}()
   966  	return testDone
   967  }
   968  
   969  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   970  // account trie has a few boundary elements.
   971  func TestSyncBoundaryAccountTrie(t *testing.T) {
   972  	t.Parallel()
   973  
   974  	testSyncBoundaryAccountTrie(t, rawdb.HashScheme)
   975  	testSyncBoundaryAccountTrie(t, rawdb.PathScheme)
   976  }
   977  
   978  func testSyncBoundaryAccountTrie(t *testing.T, scheme string) {
   979  	var (
   980  		once   sync.Once
   981  		cancel = make(chan struct{})
   982  		term   = func() {
   983  			once.Do(func() {
   984  				close(cancel)
   985  			})
   986  		}
   987  	)
   988  	nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(scheme, 3000)
   989  
   990  	mkSource := func(name string) *testPeer {
   991  		source := newTestPeer(name, t, term)
   992  		source.accountTrie = sourceAccountTrie.Copy()
   993  		source.accountValues = elems
   994  		return source
   995  	}
   996  	syncer := setupSyncer(
   997  		nodeScheme,
   998  		mkSource("peer-a"),
   999  		mkSource("peer-b"),
  1000  	)
  1001  	done := checkStall(t, term)
  1002  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1003  		t.Fatalf("sync failed: %v", err)
  1004  	}
  1005  	close(done)
  1006  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1007  }
  1008  
  1009  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
  1010  // consistently returning very small results
  1011  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
  1012  	t.Parallel()
  1013  
  1014  	testSyncNoStorageAndOneCappedPeer(t, rawdb.HashScheme)
  1015  	testSyncNoStorageAndOneCappedPeer(t, rawdb.PathScheme)
  1016  }
  1017  
  1018  func testSyncNoStorageAndOneCappedPeer(t *testing.T, scheme string) {
  1019  	var (
  1020  		once   sync.Once
  1021  		cancel = make(chan struct{})
  1022  		term   = func() {
  1023  			once.Do(func() {
  1024  				close(cancel)
  1025  			})
  1026  		}
  1027  	)
  1028  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1029  
  1030  	mkSource := func(name string, slow bool) *testPeer {
  1031  		source := newTestPeer(name, t, term)
  1032  		source.accountTrie = sourceAccountTrie.Copy()
  1033  		source.accountValues = elems
  1034  
  1035  		if slow {
  1036  			source.accountRequestHandler = starvingAccountRequestHandler
  1037  		}
  1038  		return source
  1039  	}
  1040  
  1041  	syncer := setupSyncer(
  1042  		nodeScheme,
  1043  		mkSource("nice-a", false),
  1044  		mkSource("nice-b", false),
  1045  		mkSource("nice-c", false),
  1046  		mkSource("capped", true),
  1047  	)
  1048  	done := checkStall(t, term)
  1049  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1050  		t.Fatalf("sync failed: %v", err)
  1051  	}
  1052  	close(done)
  1053  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1054  }
  1055  
  1056  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  1057  // code requests properly.
  1058  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  1059  	t.Parallel()
  1060  
  1061  	testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.HashScheme)
  1062  	testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.PathScheme)
  1063  }
  1064  
  1065  func testSyncNoStorageAndOneCodeCorruptPeer(t *testing.T, scheme string) {
  1066  	var (
  1067  		once   sync.Once
  1068  		cancel = make(chan struct{})
  1069  		term   = func() {
  1070  			once.Do(func() {
  1071  				close(cancel)
  1072  			})
  1073  		}
  1074  	)
  1075  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1076  
  1077  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1078  		source := newTestPeer(name, t, term)
  1079  		source.accountTrie = sourceAccountTrie.Copy()
  1080  		source.accountValues = elems
  1081  		source.codeRequestHandler = codeFn
  1082  		return source
  1083  	}
  1084  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1085  	// chance that the full set of codes requested are sent only to the
  1086  	// non-corrupt peer, which delivers everything in one go, and makes the
  1087  	// test moot
  1088  	syncer := setupSyncer(
  1089  		nodeScheme,
  1090  		mkSource("capped", cappedCodeRequestHandler),
  1091  		mkSource("corrupt", corruptCodeRequestHandler),
  1092  	)
  1093  	done := checkStall(t, term)
  1094  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1095  		t.Fatalf("sync failed: %v", err)
  1096  	}
  1097  	close(done)
  1098  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1099  }
  1100  
  1101  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1102  	t.Parallel()
  1103  
  1104  	testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.HashScheme)
  1105  	testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.PathScheme)
  1106  }
  1107  
  1108  func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) {
  1109  	var (
  1110  		once   sync.Once
  1111  		cancel = make(chan struct{})
  1112  		term   = func() {
  1113  			once.Do(func() {
  1114  				close(cancel)
  1115  			})
  1116  		}
  1117  	)
  1118  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1119  
  1120  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1121  		source := newTestPeer(name, t, term)
  1122  		source.accountTrie = sourceAccountTrie.Copy()
  1123  		source.accountValues = elems
  1124  		source.accountRequestHandler = accFn
  1125  		return source
  1126  	}
  1127  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1128  	// chance that the full set of codes requested are sent only to the
  1129  	// non-corrupt peer, which delivers everything in one go, and makes the
  1130  	// test moot
  1131  	syncer := setupSyncer(
  1132  		nodeScheme,
  1133  		mkSource("capped", defaultAccountRequestHandler),
  1134  		mkSource("corrupt", corruptAccountRequestHandler),
  1135  	)
  1136  	done := checkStall(t, term)
  1137  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1138  		t.Fatalf("sync failed: %v", err)
  1139  	}
  1140  	close(done)
  1141  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1142  }
  1143  
  1144  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1145  // one by one
  1146  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1147  	t.Parallel()
  1148  
  1149  	testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.HashScheme)
  1150  	testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.PathScheme)
  1151  }
  1152  
  1153  func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) {
  1154  	var (
  1155  		once   sync.Once
  1156  		cancel = make(chan struct{})
  1157  		term   = func() {
  1158  			once.Do(func() {
  1159  				close(cancel)
  1160  			})
  1161  		}
  1162  	)
  1163  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
  1164  
  1165  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1166  		source := newTestPeer(name, t, term)
  1167  		source.accountTrie = sourceAccountTrie.Copy()
  1168  		source.accountValues = elems
  1169  		source.codeRequestHandler = codeFn
  1170  		return source
  1171  	}
  1172  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1173  	// so it shouldn't be more than that
  1174  	var counter int
  1175  	syncer := setupSyncer(
  1176  		nodeScheme,
  1177  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1178  			counter++
  1179  			return cappedCodeRequestHandler(t, id, hashes, max)
  1180  		}),
  1181  	)
  1182  	done := checkStall(t, term)
  1183  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1184  		t.Fatalf("sync failed: %v", err)
  1185  	}
  1186  	close(done)
  1187  
  1188  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1189  	// deduplication is per request batch. If it were a perfect global dedup,
  1190  	// we would expect only 8 requests. If there were no dedup, there would be
  1191  	// 3k requests.
  1192  	// We expect somewhere below 100 requests for these 8 unique hashes. But
  1193  	// the number can be flaky, so don't limit it so strictly.
  1194  	if threshold := 100; counter > threshold {
  1195  		t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
  1196  	}
  1197  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1198  }
  1199  
  1200  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1201  // storage trie has a few boundary elements.
  1202  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1203  	t.Parallel()
  1204  
  1205  	testSyncBoundaryStorageTrie(t, rawdb.HashScheme)
  1206  	testSyncBoundaryStorageTrie(t, rawdb.PathScheme)
  1207  }
  1208  
  1209  func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
  1210  	var (
  1211  		once   sync.Once
  1212  		cancel = make(chan struct{})
  1213  		term   = func() {
  1214  			once.Do(func() {
  1215  				close(cancel)
  1216  			})
  1217  		}
  1218  	)
  1219  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true, false)
  1220  
  1221  	mkSource := func(name string) *testPeer {
  1222  		source := newTestPeer(name, t, term)
  1223  		source.accountTrie = sourceAccountTrie.Copy()
  1224  		source.accountValues = elems
  1225  		source.setStorageTries(storageTries)
  1226  		source.storageValues = storageElems
  1227  		return source
  1228  	}
  1229  	syncer := setupSyncer(
  1230  		scheme,
  1231  		mkSource("peer-a"),
  1232  		mkSource("peer-b"),
  1233  	)
  1234  	done := checkStall(t, term)
  1235  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1236  		t.Fatalf("sync failed: %v", err)
  1237  	}
  1238  	close(done)
  1239  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1240  }
  1241  
  1242  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1243  // consistently returning very small results
  1244  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1245  	t.Parallel()
  1246  
  1247  	testSyncWithStorageAndOneCappedPeer(t, rawdb.HashScheme)
  1248  	testSyncWithStorageAndOneCappedPeer(t, rawdb.PathScheme)
  1249  }
  1250  
  1251  func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
  1252  	var (
  1253  		once   sync.Once
  1254  		cancel = make(chan struct{})
  1255  		term   = func() {
  1256  			once.Do(func() {
  1257  				close(cancel)
  1258  			})
  1259  		}
  1260  	)
  1261  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false, false)
  1262  
  1263  	mkSource := func(name string, slow bool) *testPeer {
  1264  		source := newTestPeer(name, t, term)
  1265  		source.accountTrie = sourceAccountTrie.Copy()
  1266  		source.accountValues = elems
  1267  		source.setStorageTries(storageTries)
  1268  		source.storageValues = storageElems
  1269  
  1270  		if slow {
  1271  			source.storageRequestHandler = starvingStorageRequestHandler
  1272  		}
  1273  		return source
  1274  	}
  1275  
  1276  	syncer := setupSyncer(
  1277  		scheme,
  1278  		mkSource("nice-a", false),
  1279  		mkSource("slow", true),
  1280  	)
  1281  	done := checkStall(t, term)
  1282  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1283  		t.Fatalf("sync failed: %v", err)
  1284  	}
  1285  	close(done)
  1286  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1287  }
  1288  
  1289  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1290  // sometimes sending bad proofs
  1291  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1292  	t.Parallel()
  1293  
  1294  	testSyncWithStorageAndCorruptPeer(t, rawdb.HashScheme)
  1295  	testSyncWithStorageAndCorruptPeer(t, rawdb.PathScheme)
  1296  }
  1297  
  1298  func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
  1299  	var (
  1300  		once   sync.Once
  1301  		cancel = make(chan struct{})
  1302  		term   = func() {
  1303  			once.Do(func() {
  1304  				close(cancel)
  1305  			})
  1306  		}
  1307  	)
  1308  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
  1309  
  1310  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1311  		source := newTestPeer(name, t, term)
  1312  		source.accountTrie = sourceAccountTrie.Copy()
  1313  		source.accountValues = elems
  1314  		source.setStorageTries(storageTries)
  1315  		source.storageValues = storageElems
  1316  		source.storageRequestHandler = handler
  1317  		return source
  1318  	}
  1319  
  1320  	syncer := setupSyncer(
  1321  		scheme,
  1322  		mkSource("nice-a", defaultStorageRequestHandler),
  1323  		mkSource("nice-b", defaultStorageRequestHandler),
  1324  		mkSource("nice-c", defaultStorageRequestHandler),
  1325  		mkSource("corrupt", corruptStorageRequestHandler),
  1326  	)
  1327  	done := checkStall(t, term)
  1328  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1329  		t.Fatalf("sync failed: %v", err)
  1330  	}
  1331  	close(done)
  1332  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1333  }
  1334  
  1335  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1336  	t.Parallel()
  1337  
  1338  	testSyncWithStorageAndNonProvingPeer(t, rawdb.HashScheme)
  1339  	testSyncWithStorageAndNonProvingPeer(t, rawdb.PathScheme)
  1340  }
  1341  
  1342  func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
  1343  	var (
  1344  		once   sync.Once
  1345  		cancel = make(chan struct{})
  1346  		term   = func() {
  1347  			once.Do(func() {
  1348  				close(cancel)
  1349  			})
  1350  		}
  1351  	)
  1352  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
  1353  
  1354  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1355  		source := newTestPeer(name, t, term)
  1356  		source.accountTrie = sourceAccountTrie.Copy()
  1357  		source.accountValues = elems
  1358  		source.setStorageTries(storageTries)
  1359  		source.storageValues = storageElems
  1360  		source.storageRequestHandler = handler
  1361  		return source
  1362  	}
  1363  	syncer := setupSyncer(
  1364  		scheme,
  1365  		mkSource("nice-a", defaultStorageRequestHandler),
  1366  		mkSource("nice-b", defaultStorageRequestHandler),
  1367  		mkSource("nice-c", defaultStorageRequestHandler),
  1368  		mkSource("corrupt", noProofStorageRequestHandler),
  1369  	)
  1370  	done := checkStall(t, term)
  1371  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1372  		t.Fatalf("sync failed: %v", err)
  1373  	}
  1374  	close(done)
  1375  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1376  }
  1377  
  1378  // TestSyncWithStorageMisbehavingProve tests  basic sync using accounts + storage + code, against
  1379  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1380  // an error, where the recipient erroneously clipped the boundary nodes, but
  1381  // did not mark the account for healing.
  1382  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1383  	t.Parallel()
  1384  
  1385  	testSyncWithStorageMisbehavingProve(t, rawdb.HashScheme)
  1386  	testSyncWithStorageMisbehavingProve(t, rawdb.PathScheme)
  1387  }
  1388  
  1389  func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
  1390  	var (
  1391  		once   sync.Once
  1392  		cancel = make(chan struct{})
  1393  		term   = func() {
  1394  			once.Do(func() {
  1395  				close(cancel)
  1396  			})
  1397  		}
  1398  	)
  1399  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(scheme, 10, 30, false)
  1400  
  1401  	mkSource := func(name string) *testPeer {
  1402  		source := newTestPeer(name, t, term)
  1403  		source.accountTrie = sourceAccountTrie.Copy()
  1404  		source.accountValues = elems
  1405  		source.setStorageTries(storageTries)
  1406  		source.storageValues = storageElems
  1407  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1408  		return source
  1409  	}
  1410  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
  1411  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1412  		t.Fatalf("sync failed: %v", err)
  1413  	}
  1414  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1415  }
  1416  
  1417  // TestSyncWithUnevenStorage tests sync where the storage trie is not even
  1418  // and with a few empty ranges.
  1419  func TestSyncWithUnevenStorage(t *testing.T) {
  1420  	t.Parallel()
  1421  
  1422  	testSyncWithUnevenStorage(t, rawdb.HashScheme)
  1423  	testSyncWithUnevenStorage(t, rawdb.PathScheme)
  1424  }
  1425  
  1426  func testSyncWithUnevenStorage(t *testing.T, scheme string) {
  1427  	var (
  1428  		once   sync.Once
  1429  		cancel = make(chan struct{})
  1430  		term   = func() {
  1431  			once.Do(func() {
  1432  				close(cancel)
  1433  			})
  1434  		}
  1435  	)
  1436  	accountTrie, accounts, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 256, false, false, true)
  1437  
  1438  	mkSource := func(name string) *testPeer {
  1439  		source := newTestPeer(name, t, term)
  1440  		source.accountTrie = accountTrie.Copy()
  1441  		source.accountValues = accounts
  1442  		source.setStorageTries(storageTries)
  1443  		source.storageValues = storageElems
  1444  		source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  1445  			return defaultStorageRequestHandler(t, reqId, root, accounts, origin, limit, 128) // retrieve storage in large mode
  1446  		}
  1447  		return source
  1448  	}
  1449  	syncer := setupSyncer(scheme, mkSource("source"))
  1450  	if err := syncer.Sync(accountTrie.Hash(), cancel); err != nil {
  1451  		t.Fatalf("sync failed: %v", err)
  1452  	}
  1453  	verifyTrie(scheme, syncer.db, accountTrie.Hash(), t)
  1454  }
  1455  
  1456  type kv struct {
  1457  	k, v []byte
  1458  }
  1459  
  1460  func (k *kv) cmp(other *kv) int {
  1461  	return bytes.Compare(k.k, other.k)
  1462  }
  1463  
  1464  func key32(i uint64) []byte {
  1465  	key := make([]byte, 32)
  1466  	binary.LittleEndian.PutUint64(key, i)
  1467  	return key
  1468  }
  1469  
  1470  var (
  1471  	codehashes = []common.Hash{
  1472  		crypto.Keccak256Hash([]byte{0}),
  1473  		crypto.Keccak256Hash([]byte{1}),
  1474  		crypto.Keccak256Hash([]byte{2}),
  1475  		crypto.Keccak256Hash([]byte{3}),
  1476  		crypto.Keccak256Hash([]byte{4}),
  1477  		crypto.Keccak256Hash([]byte{5}),
  1478  		crypto.Keccak256Hash([]byte{6}),
  1479  		crypto.Keccak256Hash([]byte{7}),
  1480  	}
  1481  )
  1482  
  1483  // getCodeHash returns a pseudo-random code hash
  1484  func getCodeHash(i uint64) []byte {
  1485  	h := codehashes[int(i)%len(codehashes)]
  1486  	return common.CopyBytes(h[:])
  1487  }
  1488  
  1489  // getCodeByHash convenience function to lookup the code from the code hash
  1490  func getCodeByHash(hash common.Hash) []byte {
  1491  	if hash == types.EmptyCodeHash {
  1492  		return nil
  1493  	}
  1494  	for i, h := range codehashes {
  1495  		if h == hash {
  1496  			return []byte{byte(i)}
  1497  		}
  1498  	}
  1499  	return nil
  1500  }
  1501  
  1502  // makeAccountTrieNoStorage spits out a trie, along with the leaves
  1503  func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) {
  1504  	var (
  1505  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1506  		accTrie = trie.NewEmpty(db)
  1507  		entries []*kv
  1508  	)
  1509  	for i := uint64(1); i <= uint64(n); i++ {
  1510  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1511  			Nonce:    i,
  1512  			Balance:  big.NewInt(int64(i)),
  1513  			Root:     types.EmptyRootHash,
  1514  			CodeHash: getCodeHash(i),
  1515  		})
  1516  		key := key32(i)
  1517  		elem := &kv{key, value}
  1518  		accTrie.MustUpdate(elem.k, elem.v)
  1519  		entries = append(entries, elem)
  1520  	}
  1521  	slices.SortFunc(entries, (*kv).cmp)
  1522  
  1523  	// Commit the state changes into db and re-create the trie
  1524  	// for accessing later.
  1525  	root, nodes, _ := accTrie.Commit(false)
  1526  	db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
  1527  
  1528  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1529  	return db.Scheme(), accTrie, entries
  1530  }
  1531  
  1532  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1533  // accounts normally, this function will fill a few accounts which have
  1534  // boundary hash.
  1535  func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
  1536  	var (
  1537  		entries    []*kv
  1538  		boundaries []common.Hash
  1539  
  1540  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1541  		accTrie = trie.NewEmpty(db)
  1542  	)
  1543  	// Initialize boundaries
  1544  	var next common.Hash
  1545  	step := new(big.Int).Sub(
  1546  		new(big.Int).Div(
  1547  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1548  			big.NewInt(int64(accountConcurrency)),
  1549  		), common.Big1,
  1550  	)
  1551  	for i := 0; i < accountConcurrency; i++ {
  1552  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1553  		if i == accountConcurrency-1 {
  1554  			last = common.MaxHash
  1555  		}
  1556  		boundaries = append(boundaries, last)
  1557  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1558  	}
  1559  	// Fill boundary accounts
  1560  	for i := 0; i < len(boundaries); i++ {
  1561  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1562  			Nonce:    uint64(0),
  1563  			Balance:  big.NewInt(int64(i)),
  1564  			Root:     types.EmptyRootHash,
  1565  			CodeHash: getCodeHash(uint64(i)),
  1566  		})
  1567  		elem := &kv{boundaries[i].Bytes(), value}
  1568  		accTrie.MustUpdate(elem.k, elem.v)
  1569  		entries = append(entries, elem)
  1570  	}
  1571  	// Fill other accounts if required
  1572  	for i := uint64(1); i <= uint64(n); i++ {
  1573  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1574  			Nonce:    i,
  1575  			Balance:  big.NewInt(int64(i)),
  1576  			Root:     types.EmptyRootHash,
  1577  			CodeHash: getCodeHash(i),
  1578  		})
  1579  		elem := &kv{key32(i), value}
  1580  		accTrie.MustUpdate(elem.k, elem.v)
  1581  		entries = append(entries, elem)
  1582  	}
  1583  	slices.SortFunc(entries, (*kv).cmp)
  1584  
  1585  	// Commit the state changes into db and re-create the trie
  1586  	// for accessing later.
  1587  	root, nodes, _ := accTrie.Commit(false)
  1588  	db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
  1589  
  1590  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1591  	return db.Scheme(), accTrie, entries
  1592  }
  1593  
  1594  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1595  // has a unique storage set.
  1596  func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
  1597  	var (
  1598  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1599  		accTrie        = trie.NewEmpty(db)
  1600  		entries        []*kv
  1601  		storageRoots   = make(map[common.Hash]common.Hash)
  1602  		storageTries   = make(map[common.Hash]*trie.Trie)
  1603  		storageEntries = make(map[common.Hash][]*kv)
  1604  		nodes          = trienode.NewMergedNodeSet()
  1605  	)
  1606  	// Create n accounts in the trie
  1607  	for i := uint64(1); i <= uint64(accounts); i++ {
  1608  		key := key32(i)
  1609  		codehash := types.EmptyCodeHash.Bytes()
  1610  		if code {
  1611  			codehash = getCodeHash(i)
  1612  		}
  1613  		// Create a storage trie
  1614  		stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
  1615  		nodes.Merge(stNodes)
  1616  
  1617  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1618  			Nonce:    i,
  1619  			Balance:  big.NewInt(int64(i)),
  1620  			Root:     stRoot,
  1621  			CodeHash: codehash,
  1622  		})
  1623  		elem := &kv{key, value}
  1624  		accTrie.MustUpdate(elem.k, elem.v)
  1625  		entries = append(entries, elem)
  1626  
  1627  		storageRoots[common.BytesToHash(key)] = stRoot
  1628  		storageEntries[common.BytesToHash(key)] = stEntries
  1629  	}
  1630  	slices.SortFunc(entries, (*kv).cmp)
  1631  
  1632  	// Commit account trie
  1633  	root, set, _ := accTrie.Commit(true)
  1634  	nodes.Merge(set)
  1635  
  1636  	// Commit gathered dirty nodes into database
  1637  	db.Update(root, types.EmptyRootHash, 0, nodes, nil)
  1638  
  1639  	// Re-create tries with new root
  1640  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1641  	for i := uint64(1); i <= uint64(accounts); i++ {
  1642  		key := key32(i)
  1643  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1644  		trie, _ := trie.New(id, db)
  1645  		storageTries[common.BytesToHash(key)] = trie
  1646  	}
  1647  	return db.Scheme(), accTrie, entries, storageTries, storageEntries
  1648  }
  1649  
  1650  // makeAccountTrieWithStorage spits out a trie, along with the leaves
  1651  func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
  1652  	var (
  1653  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
  1654  		accTrie        = trie.NewEmpty(db)
  1655  		entries        []*kv
  1656  		storageRoots   = make(map[common.Hash]common.Hash)
  1657  		storageTries   = make(map[common.Hash]*trie.Trie)
  1658  		storageEntries = make(map[common.Hash][]*kv)
  1659  		nodes          = trienode.NewMergedNodeSet()
  1660  	)
  1661  	// Create n accounts in the trie
  1662  	for i := uint64(1); i <= uint64(accounts); i++ {
  1663  		key := key32(i)
  1664  		codehash := types.EmptyCodeHash.Bytes()
  1665  		if code {
  1666  			codehash = getCodeHash(i)
  1667  		}
  1668  		// Make a storage trie
  1669  		var (
  1670  			stRoot    common.Hash
  1671  			stNodes   *trienode.NodeSet
  1672  			stEntries []*kv
  1673  		)
  1674  		if boundary {
  1675  			stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
  1676  		} else if uneven {
  1677  			stRoot, stNodes, stEntries = makeUnevenStorageTrie(common.BytesToHash(key), slots, db)
  1678  		} else {
  1679  			stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
  1680  		}
  1681  		nodes.Merge(stNodes)
  1682  
  1683  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1684  			Nonce:    i,
  1685  			Balance:  big.NewInt(int64(i)),
  1686  			Root:     stRoot,
  1687  			CodeHash: codehash,
  1688  		})
  1689  		elem := &kv{key, value}
  1690  		accTrie.MustUpdate(elem.k, elem.v)
  1691  		entries = append(entries, elem)
  1692  
  1693  		// we reuse the same one for all accounts
  1694  		storageRoots[common.BytesToHash(key)] = stRoot
  1695  		storageEntries[common.BytesToHash(key)] = stEntries
  1696  	}
  1697  	slices.SortFunc(entries, (*kv).cmp)
  1698  
  1699  	// Commit account trie
  1700  	root, set, _ := accTrie.Commit(true)
  1701  	nodes.Merge(set)
  1702  
  1703  	// Commit gathered dirty nodes into database
  1704  	db.Update(root, types.EmptyRootHash, 0, nodes, nil)
  1705  
  1706  	// Re-create tries with new root
  1707  	accTrie, err := trie.New(trie.StateTrieID(root), db)
  1708  	if err != nil {
  1709  		panic(err)
  1710  	}
  1711  	for i := uint64(1); i <= uint64(accounts); i++ {
  1712  		key := key32(i)
  1713  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1714  		trie, err := trie.New(id, db)
  1715  		if err != nil {
  1716  			panic(err)
  1717  		}
  1718  		storageTries[common.BytesToHash(key)] = trie
  1719  	}
  1720  	return accTrie, entries, storageTries, storageEntries
  1721  }
  1722  
  1723  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1724  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1725  // that tries are unique.
  1726  func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1727  	trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1728  	var entries []*kv
  1729  	for i := uint64(1); i <= n; i++ {
  1730  		// store 'x' at slot 'x'
  1731  		slotValue := key32(i + seed)
  1732  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1733  
  1734  		slotKey := key32(i)
  1735  		key := crypto.Keccak256Hash(slotKey[:])
  1736  
  1737  		elem := &kv{key[:], rlpSlotValue}
  1738  		trie.MustUpdate(elem.k, elem.v)
  1739  		entries = append(entries, elem)
  1740  	}
  1741  	slices.SortFunc(entries, (*kv).cmp)
  1742  	root, nodes, _ := trie.Commit(false)
  1743  	return root, nodes, entries
  1744  }
  1745  
  1746  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1747  // storage slots normally, this function will fill a few slots which have
  1748  // boundary hash.
  1749  func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1750  	var (
  1751  		entries    []*kv
  1752  		boundaries []common.Hash
  1753  		trie, _    = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1754  	)
  1755  	// Initialize boundaries
  1756  	var next common.Hash
  1757  	step := new(big.Int).Sub(
  1758  		new(big.Int).Div(
  1759  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1760  			big.NewInt(int64(accountConcurrency)),
  1761  		), common.Big1,
  1762  	)
  1763  	for i := 0; i < accountConcurrency; i++ {
  1764  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1765  		if i == accountConcurrency-1 {
  1766  			last = common.MaxHash
  1767  		}
  1768  		boundaries = append(boundaries, last)
  1769  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1770  	}
  1771  	// Fill boundary slots
  1772  	for i := 0; i < len(boundaries); i++ {
  1773  		key := boundaries[i]
  1774  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1775  
  1776  		elem := &kv{key[:], val}
  1777  		trie.MustUpdate(elem.k, elem.v)
  1778  		entries = append(entries, elem)
  1779  	}
  1780  	// Fill other slots if required
  1781  	for i := uint64(1); i <= uint64(n); i++ {
  1782  		slotKey := key32(i)
  1783  		key := crypto.Keccak256Hash(slotKey[:])
  1784  
  1785  		slotValue := key32(i)
  1786  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1787  
  1788  		elem := &kv{key[:], rlpSlotValue}
  1789  		trie.MustUpdate(elem.k, elem.v)
  1790  		entries = append(entries, elem)
  1791  	}
  1792  	slices.SortFunc(entries, (*kv).cmp)
  1793  	root, nodes, _ := trie.Commit(false)
  1794  	return root, nodes, entries
  1795  }
  1796  
  1797  // makeUnevenStorageTrie constructs a storage tries will states distributed in
  1798  // different range unevenly.
  1799  func makeUnevenStorageTrie(owner common.Hash, slots int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
  1800  	var (
  1801  		entries []*kv
  1802  		tr, _   = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
  1803  		chosen  = make(map[byte]struct{})
  1804  	)
  1805  	for i := 0; i < 3; i++ {
  1806  		var n int
  1807  		for {
  1808  			n = mrand.Intn(15) // the last range is set empty deliberately
  1809  			if _, ok := chosen[byte(n)]; ok {
  1810  				continue
  1811  			}
  1812  			chosen[byte(n)] = struct{}{}
  1813  			break
  1814  		}
  1815  		for j := 0; j < slots/3; j++ {
  1816  			key := append([]byte{byte(n)}, testutil.RandBytes(31)...)
  1817  			val, _ := rlp.EncodeToBytes(testutil.RandBytes(32))
  1818  
  1819  			elem := &kv{key, val}
  1820  			tr.MustUpdate(elem.k, elem.v)
  1821  			entries = append(entries, elem)
  1822  		}
  1823  	}
  1824  	slices.SortFunc(entries, (*kv).cmp)
  1825  	root, nodes, _ := tr.Commit(false)
  1826  	return root, nodes, entries
  1827  }
  1828  
  1829  func verifyTrie(scheme string, db zonddb.KeyValueStore, root common.Hash, t *testing.T) {
  1830  	t.Helper()
  1831  	triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
  1832  	accTrie, err := trie.New(trie.StateTrieID(root), triedb)
  1833  	if err != nil {
  1834  		t.Fatal(err)
  1835  	}
  1836  	accounts, slots := 0, 0
  1837  	accIt := trie.NewIterator(accTrie.MustNodeIterator(nil))
  1838  	for accIt.Next() {
  1839  		var acc struct {
  1840  			Nonce    uint64
  1841  			Balance  *big.Int
  1842  			Root     common.Hash
  1843  			CodeHash []byte
  1844  		}
  1845  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1846  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1847  		}
  1848  		accounts++
  1849  		if acc.Root != types.EmptyRootHash {
  1850  			id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root)
  1851  			storeTrie, err := trie.NewStateTrie(id, triedb)
  1852  			if err != nil {
  1853  				t.Fatal(err)
  1854  			}
  1855  			storeIt := trie.NewIterator(storeTrie.MustNodeIterator(nil))
  1856  			for storeIt.Next() {
  1857  				slots++
  1858  			}
  1859  			if err := storeIt.Err; err != nil {
  1860  				t.Fatal(err)
  1861  			}
  1862  		}
  1863  	}
  1864  	if err := accIt.Err; err != nil {
  1865  		t.Fatal(err)
  1866  	}
  1867  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1868  }
  1869  
  1870  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1871  // state healing
  1872  func TestSyncAccountPerformance(t *testing.T) {
  1873  	// These tests must not run in parallel: they modify the
  1874  	// global var accountConcurrency
  1875  	// t.Parallel()
  1876  
  1877  	testSyncAccountPerformance(t, rawdb.HashScheme)
  1878  	testSyncAccountPerformance(t, rawdb.PathScheme)
  1879  }
  1880  
  1881  func testSyncAccountPerformance(t *testing.T, scheme string) {
  1882  	// Set the account concurrency to 1. This _should_ result in the
  1883  	// range root to become correct, and there should be no healing needed
  1884  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1885  	accountConcurrency = 1
  1886  
  1887  	var (
  1888  		once   sync.Once
  1889  		cancel = make(chan struct{})
  1890  		term   = func() {
  1891  			once.Do(func() {
  1892  				close(cancel)
  1893  			})
  1894  		}
  1895  	)
  1896  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
  1897  
  1898  	mkSource := func(name string) *testPeer {
  1899  		source := newTestPeer(name, t, term)
  1900  		source.accountTrie = sourceAccountTrie.Copy()
  1901  		source.accountValues = elems
  1902  		return source
  1903  	}
  1904  	src := mkSource("source")
  1905  	syncer := setupSyncer(nodeScheme, src)
  1906  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1907  		t.Fatalf("sync failed: %v", err)
  1908  	}
  1909  	verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
  1910  	// The trie root will always be requested, since it is added when the snap
  1911  	// sync cycle starts. When popping the queue, we do not look it up again.
  1912  	// Doing so would bring this number down to zero in this artificial testcase,
  1913  	// but only add extra IO for no reason in practice.
  1914  	if have, want := src.nTrienodeRequests, 1; have != want {
  1915  		fmt.Print(src.Stats())
  1916  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1917  	}
  1918  }
  1919  
  1920  func TestSlotEstimation(t *testing.T) {
  1921  	for i, tc := range []struct {
  1922  		last  common.Hash
  1923  		count int
  1924  		want  uint64
  1925  	}{
  1926  		{
  1927  			// Half the space
  1928  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1929  			100,
  1930  			100,
  1931  		},
  1932  		{
  1933  			// 1 / 16th
  1934  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1935  			100,
  1936  			1500,
  1937  		},
  1938  		{
  1939  			// Bit more than 1 / 16th
  1940  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1941  			100,
  1942  			1499,
  1943  		},
  1944  		{
  1945  			// Almost everything
  1946  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1947  			100,
  1948  			6,
  1949  		},
  1950  		{
  1951  			// Almost nothing -- should lead to error
  1952  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1953  			1,
  1954  			0,
  1955  		},
  1956  		{
  1957  			// Nothing -- should lead to error
  1958  			common.Hash{},
  1959  			100,
  1960  			0,
  1961  		},
  1962  	} {
  1963  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1964  		if want := tc.want; have != want {
  1965  			t.Errorf("test %d: have %d want %d", i, have, want)
  1966  		}
  1967  	}
  1968  }
  1969  
  1970  func newDbConfig(scheme string) *trie.Config {
  1971  	if scheme == rawdb.HashScheme {
  1972  		return &trie.Config{}
  1973  	}
  1974  	return &trie.Config{PathDB: pathdb.Defaults}
  1975  }