github.com/aidoskuneen/adk-node@v0.0.0-20220315131952-2e32567cb7f4/eth/protocols/snap/sync_test.go (about)

     1  // Copyright 2021 The adkgo Authors
     2  // This file is part of the adkgo library (adapted for adkgo from go--ethereum v1.10.8).
     3  //
     4  // the adkgo library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // the adkgo library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the adkgo library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	"sort"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/aidoskuneen/adk-node/common"
    31  	"github.com/aidoskuneen/adk-node/core/rawdb"
    32  	"github.com/aidoskuneen/adk-node/core/state"
    33  	"github.com/aidoskuneen/adk-node/crypto"
    34  	"github.com/aidoskuneen/adk-node/ethdb"
    35  	"github.com/aidoskuneen/adk-node/light"
    36  	"github.com/aidoskuneen/adk-node/log"
    37  	"github.com/aidoskuneen/adk-node/rlp"
    38  	"github.com/aidoskuneen/adk-node/trie"
    39  	"golang.org/x/crypto/sha3"
    40  )
    41  
    42  func TestHashing(t *testing.T) {
    43  	t.Parallel()
    44  
    45  	var bytecodes = make([][]byte, 10)
    46  	for i := 0; i < len(bytecodes); i++ {
    47  		buf := make([]byte, 100)
    48  		rand.Read(buf)
    49  		bytecodes[i] = buf
    50  	}
    51  	var want, got string
    52  	var old = func() {
    53  		hasher := sha3.NewLegacyKeccak256()
    54  		for i := 0; i < len(bytecodes); i++ {
    55  			hasher.Reset()
    56  			hasher.Write(bytecodes[i])
    57  			hash := hasher.Sum(nil)
    58  			got = fmt.Sprintf("%v\n%v", got, hash)
    59  		}
    60  	}
    61  	var new = func() {
    62  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    63  		var hash = make([]byte, 32)
    64  		for i := 0; i < len(bytecodes); i++ {
    65  			hasher.Reset()
    66  			hasher.Write(bytecodes[i])
    67  			hasher.Read(hash)
    68  			want = fmt.Sprintf("%v\n%v", want, hash)
    69  		}
    70  	}
    71  	old()
    72  	new()
    73  	if want != got {
    74  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    75  	}
    76  }
    77  
    78  func BenchmarkHashing(b *testing.B) {
    79  	var bytecodes = make([][]byte, 10000)
    80  	for i := 0; i < len(bytecodes); i++ {
    81  		buf := make([]byte, 100)
    82  		rand.Read(buf)
    83  		bytecodes[i] = buf
    84  	}
    85  	var old = func() {
    86  		hasher := sha3.NewLegacyKeccak256()
    87  		for i := 0; i < len(bytecodes); i++ {
    88  			hasher.Reset()
    89  			hasher.Write(bytecodes[i])
    90  			hasher.Sum(nil)
    91  		}
    92  	}
    93  	var new = func() {
    94  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    95  		var hash = make([]byte, 32)
    96  		for i := 0; i < len(bytecodes); i++ {
    97  			hasher.Reset()
    98  			hasher.Write(bytecodes[i])
    99  			hasher.Read(hash)
   100  		}
   101  	}
   102  	b.Run("old", func(b *testing.B) {
   103  		b.ReportAllocs()
   104  		for i := 0; i < b.N; i++ {
   105  			old()
   106  		}
   107  	})
   108  	b.Run("new", func(b *testing.B) {
   109  		b.ReportAllocs()
   110  		for i := 0; i < b.N; i++ {
   111  			new()
   112  		}
   113  	})
   114  }
   115  
   116  type (
   117  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   118  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   119  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   120  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   121  )
   122  
   123  type testPeer struct {
   124  	id            string
   125  	test          *testing.T
   126  	remote        *Syncer
   127  	logger        log.Logger
   128  	accountTrie   *trie.Trie
   129  	accountValues entrySlice
   130  	storageTries  map[common.Hash]*trie.Trie
   131  	storageValues map[common.Hash]entrySlice
   132  
   133  	accountRequestHandler accountHandlerFunc
   134  	storageRequestHandler storageHandlerFunc
   135  	trieRequestHandler    trieHandlerFunc
   136  	codeRequestHandler    codeHandlerFunc
   137  	term                  func()
   138  
   139  	// counters
   140  	nAccountRequests  int
   141  	nStorageRequests  int
   142  	nBytecodeRequests int
   143  	nTrienodeRequests int
   144  }
   145  
   146  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   147  	peer := &testPeer{
   148  		id:                    id,
   149  		test:                  t,
   150  		logger:                log.New("id", id),
   151  		accountRequestHandler: defaultAccountRequestHandler,
   152  		trieRequestHandler:    defaultTrieRequestHandler,
   153  		storageRequestHandler: defaultStorageRequestHandler,
   154  		codeRequestHandler:    defaultCodeRequestHandler,
   155  		term:                  term,
   156  	}
   157  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   158  	//peer.logger.SetHandler(stderrHandler)
   159  	return peer
   160  }
   161  
   162  func (t *testPeer) ID() string      { return t.id }
   163  func (t *testPeer) Log() log.Logger { return t.logger }
   164  
   165  func (t *testPeer) Stats() string {
   166  	return fmt.Sprintf(`Account requests: %d
   167  Storage requests: %d
   168  Bytecode requests: %d
   169  Trienode requests: %d
   170  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   171  }
   172  
   173  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   174  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   175  	t.nAccountRequests++
   176  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   177  	return nil
   178  }
   179  
   180  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   181  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   182  	t.nTrienodeRequests++
   183  	go t.trieRequestHandler(t, id, root, paths, bytes)
   184  	return nil
   185  }
   186  
   187  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   188  	t.nStorageRequests++
   189  	if len(accounts) == 1 && origin != nil {
   190  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   191  	} else {
   192  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   193  	}
   194  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   195  	return nil
   196  }
   197  
   198  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   199  	t.nBytecodeRequests++
   200  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   201  	go t.codeRequestHandler(t, id, hashes, bytes)
   202  	return nil
   203  }
   204  
   205  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   206  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   207  	// Pass the response
   208  	var nodes [][]byte
   209  	for _, pathset := range paths {
   210  		switch len(pathset) {
   211  		case 1:
   212  			blob, _, err := t.accountTrie.TryGetNode(pathset[0])
   213  			if err != nil {
   214  				t.logger.Info("Error handling req", "error", err)
   215  				break
   216  			}
   217  			nodes = append(nodes, blob)
   218  		default:
   219  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   220  			for _, path := range pathset[1:] {
   221  				blob, _, err := account.TryGetNode(path)
   222  				if err != nil {
   223  					t.logger.Info("Error handling req", "error", err)
   224  					break
   225  				}
   226  				nodes = append(nodes, blob)
   227  			}
   228  		}
   229  	}
   230  	t.remote.OnTrieNodes(t, requestId, nodes)
   231  	return nil
   232  }
   233  
   234  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   235  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   236  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   237  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   238  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   239  		t.term()
   240  		return err
   241  	}
   242  	return nil
   243  }
   244  
   245  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   246  	var size uint64
   247  	if limit == (common.Hash{}) {
   248  		limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   249  	}
   250  	for _, entry := range t.accountValues {
   251  		if size > cap {
   252  			break
   253  		}
   254  		if bytes.Compare(origin[:], entry.k) <= 0 {
   255  			keys = append(keys, common.BytesToHash(entry.k))
   256  			vals = append(vals, entry.v)
   257  			size += uint64(32 + len(entry.v))
   258  		}
   259  		// If we've exceeded the request threshold, abort
   260  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   261  			break
   262  		}
   263  	}
   264  	// Unless we send the entire trie, we need to supply proofs
   265  	// Actually, we need to supply proofs either way! This seems to be an implementation
   266  	// quirk in adkgo
   267  	proof := light.NewNodeSet()
   268  	if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   269  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   270  	}
   271  	if len(keys) > 0 {
   272  		lastK := (keys[len(keys)-1])[:]
   273  		if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
   274  			t.logger.Error("Could not prove last item", "error", err)
   275  		}
   276  	}
   277  	for _, blob := range proof.NodeList() {
   278  		proofs = append(proofs, blob)
   279  	}
   280  	return keys, vals, proofs
   281  }
   282  
   283  // defaultStorageRequestHandler is a well-behaving storage request handler
   284  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   285  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   286  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   287  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   288  		t.term()
   289  	}
   290  	return nil
   291  }
   292  
   293  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   294  	var bytecodes [][]byte
   295  	for _, h := range hashes {
   296  		bytecodes = append(bytecodes, getCodeByHash(h))
   297  	}
   298  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   299  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   300  		t.term()
   301  	}
   302  	return nil
   303  }
   304  
   305  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   306  	var size uint64
   307  	for _, account := range accounts {
   308  		// The first account might start from a different origin and end sooner
   309  		var originHash common.Hash
   310  		if len(origin) > 0 {
   311  			originHash = common.BytesToHash(origin)
   312  		}
   313  		var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   314  		if len(limit) > 0 {
   315  			limitHash = common.BytesToHash(limit)
   316  		}
   317  		var (
   318  			keys  []common.Hash
   319  			vals  [][]byte
   320  			abort bool
   321  		)
   322  		for _, entry := range t.storageValues[account] {
   323  			if size >= max {
   324  				abort = true
   325  				break
   326  			}
   327  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   328  				continue
   329  			}
   330  			keys = append(keys, common.BytesToHash(entry.k))
   331  			vals = append(vals, entry.v)
   332  			size += uint64(32 + len(entry.v))
   333  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   334  				break
   335  			}
   336  		}
   337  		hashes = append(hashes, keys)
   338  		slots = append(slots, vals)
   339  
   340  		// Generate the Merkle proofs for the first and last storage slot, but
   341  		// only if the response was capped. If the entire storage trie included
   342  		// in the response, no need for any proofs.
   343  		if originHash != (common.Hash{}) || abort {
   344  			// If we're aborting, we need to prove the first and last item
   345  			// This terminates the response (and thus the loop)
   346  			proof := light.NewNodeSet()
   347  			stTrie := t.storageTries[account]
   348  
   349  			// Here's a potential gotcha: when constructing the proof, we cannot
   350  			// use the 'origin' slice directly, but must use the full 32-byte
   351  			// hash form.
   352  			if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
   353  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   354  			}
   355  			if len(keys) > 0 {
   356  				lastK := (keys[len(keys)-1])[:]
   357  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   358  					t.logger.Error("Could not prove last item", "error", err)
   359  				}
   360  			}
   361  			for _, blob := range proof.NodeList() {
   362  				proofs = append(proofs, blob)
   363  			}
   364  			break
   365  		}
   366  	}
   367  	return hashes, slots, proofs
   368  }
   369  
   370  //  the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
   371  // supplies the proof for the last account, even if it is 'complete'.h
   372  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   373  	var size uint64
   374  	max = max * 3 / 4
   375  
   376  	var origin common.Hash
   377  	if len(bOrigin) > 0 {
   378  		origin = common.BytesToHash(bOrigin)
   379  	}
   380  	var exit bool
   381  	for i, account := range accounts {
   382  		var keys []common.Hash
   383  		var vals [][]byte
   384  		for _, entry := range t.storageValues[account] {
   385  			if bytes.Compare(entry.k, origin[:]) < 0 {
   386  				exit = true
   387  			}
   388  			keys = append(keys, common.BytesToHash(entry.k))
   389  			vals = append(vals, entry.v)
   390  			size += uint64(32 + len(entry.v))
   391  			if size > max {
   392  				exit = true
   393  			}
   394  		}
   395  		if i == len(accounts)-1 {
   396  			exit = true
   397  		}
   398  		hashes = append(hashes, keys)
   399  		slots = append(slots, vals)
   400  
   401  		if exit {
   402  			// If we're aborting, we need to prove the first and last item
   403  			// This terminates the response (and thus the loop)
   404  			proof := light.NewNodeSet()
   405  			stTrie := t.storageTries[account]
   406  
   407  			// Here's a potential gotcha: when constructing the proof, we cannot
   408  			// use the 'origin' slice directly, but must use the full 32-byte
   409  			// hash form.
   410  			if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   411  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   412  					"error", err)
   413  			}
   414  			if len(keys) > 0 {
   415  				lastK := (keys[len(keys)-1])[:]
   416  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   417  					t.logger.Error("Could not prove last item", "error", err)
   418  				}
   419  			}
   420  			for _, blob := range proof.NodeList() {
   421  				proofs = append(proofs, blob)
   422  			}
   423  			break
   424  		}
   425  	}
   426  	return hashes, slots, proofs
   427  }
   428  
   429  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   430  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   431  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   432  	return nil
   433  }
   434  
   435  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   436  	return nil
   437  }
   438  
   439  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   440  	t.remote.OnTrieNodes(t, requestId, nil)
   441  	return nil
   442  }
   443  
   444  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   445  	return nil
   446  }
   447  
   448  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   449  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   450  	return nil
   451  }
   452  
   453  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   454  	return nil
   455  }
   456  
   457  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   458  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   459  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   460  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   461  		t.term()
   462  	}
   463  	return nil
   464  }
   465  
   466  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   467  //	var bytecodes [][]byte
   468  //	t.remote.OnByteCodes(t, id, bytecodes)
   469  //	return nil
   470  //}
   471  
   472  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   473  	var bytecodes [][]byte
   474  	for _, h := range hashes {
   475  		// Send back the hashes
   476  		bytecodes = append(bytecodes, h[:])
   477  	}
   478  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   479  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   480  		// Mimic the real-life handler, which drops a peer on errors
   481  		t.remote.Unregister(t.id)
   482  	}
   483  	return nil
   484  }
   485  
   486  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   487  	var bytecodes [][]byte
   488  	for _, h := range hashes[:1] {
   489  		bytecodes = append(bytecodes, getCodeByHash(h))
   490  	}
   491  	// Missing bytecode can be retrieved again, no error expected
   492  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   493  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   494  		t.term()
   495  	}
   496  	return nil
   497  }
   498  
   499  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   500  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   501  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   502  }
   503  
   504  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   505  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   506  }
   507  
   508  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   509  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   510  //}
   511  
   512  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   513  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   514  	if len(proofs) > 0 {
   515  		proofs = proofs[1:]
   516  	}
   517  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   518  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   519  		// Mimic the real-life handler, which drops a peer on errors
   520  		t.remote.Unregister(t.id)
   521  	}
   522  	return nil
   523  }
   524  
   525  // corruptStorageRequestHandler doesn't provide good proofs
   526  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   527  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   528  	if len(proofs) > 0 {
   529  		proofs = proofs[1:]
   530  	}
   531  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   532  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   533  		// Mimic the real-life handler, which drops a peer on errors
   534  		t.remote.Unregister(t.id)
   535  	}
   536  	return nil
   537  }
   538  
   539  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   540  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   541  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   542  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   543  		// Mimic the real-life handler, which drops a peer on errors
   544  		t.remote.Unregister(t.id)
   545  	}
   546  	return nil
   547  }
   548  
   549  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   550  // also ship the entire trie inside the proof. If the attack is successful,
   551  // the remote side does not do any follow-up requests
   552  func TestSyncBloatedProof(t *testing.T) {
   553  	t.Parallel()
   554  
   555  	var (
   556  		once   sync.Once
   557  		cancel = make(chan struct{})
   558  		term   = func() {
   559  			once.Do(func() {
   560  				close(cancel)
   561  			})
   562  		}
   563  	)
   564  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   565  	source := newTestPeer("source", t, term)
   566  	source.accountTrie = sourceAccountTrie
   567  	source.accountValues = elems
   568  
   569  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   570  		var (
   571  			proofs [][]byte
   572  			keys   []common.Hash
   573  			vals   [][]byte
   574  		)
   575  		// The values
   576  		for _, entry := range t.accountValues {
   577  			if bytes.Compare(entry.k, origin[:]) < 0 {
   578  				continue
   579  			}
   580  			if bytes.Compare(entry.k, limit[:]) > 0 {
   581  				continue
   582  			}
   583  			keys = append(keys, common.BytesToHash(entry.k))
   584  			vals = append(vals, entry.v)
   585  		}
   586  		// The proofs
   587  		proof := light.NewNodeSet()
   588  		if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   589  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   590  		}
   591  		// The bloat: add proof of every single element
   592  		for _, entry := range t.accountValues {
   593  			if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
   594  				t.logger.Error("Could not prove item", "error", err)
   595  			}
   596  		}
   597  		// And remove one item from the elements
   598  		if len(keys) > 2 {
   599  			keys = append(keys[:1], keys[2:]...)
   600  			vals = append(vals[:1], vals[2:]...)
   601  		}
   602  		for _, blob := range proof.NodeList() {
   603  			proofs = append(proofs, blob)
   604  		}
   605  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   606  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   607  			t.term()
   608  			// This is actually correct, signal to exit the test successfully
   609  		}
   610  		return nil
   611  	}
   612  	syncer := setupSyncer(source)
   613  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   614  		t.Fatal("No error returned from incomplete/cancelled sync")
   615  	}
   616  }
   617  
   618  func setupSyncer(peers ...*testPeer) *Syncer {
   619  	stateDb := rawdb.NewMemoryDatabase()
   620  	syncer := NewSyncer(stateDb)
   621  	for _, peer := range peers {
   622  		syncer.Register(peer)
   623  		peer.remote = syncer
   624  	}
   625  	return syncer
   626  }
   627  
   628  // TestSync tests a basic sync with one peer
   629  func TestSync(t *testing.T) {
   630  	t.Parallel()
   631  
   632  	var (
   633  		once   sync.Once
   634  		cancel = make(chan struct{})
   635  		term   = func() {
   636  			once.Do(func() {
   637  				close(cancel)
   638  			})
   639  		}
   640  	)
   641  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   642  
   643  	mkSource := func(name string) *testPeer {
   644  		source := newTestPeer(name, t, term)
   645  		source.accountTrie = sourceAccountTrie
   646  		source.accountValues = elems
   647  		return source
   648  	}
   649  	syncer := setupSyncer(mkSource("source"))
   650  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   651  		t.Fatalf("sync failed: %v", err)
   652  	}
   653  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   654  }
   655  
   656  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   657  // panic within the prover
   658  func TestSyncTinyTriePanic(t *testing.T) {
   659  	t.Parallel()
   660  
   661  	var (
   662  		once   sync.Once
   663  		cancel = make(chan struct{})
   664  		term   = func() {
   665  			once.Do(func() {
   666  				close(cancel)
   667  			})
   668  		}
   669  	)
   670  	sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
   671  
   672  	mkSource := func(name string) *testPeer {
   673  		source := newTestPeer(name, t, term)
   674  		source.accountTrie = sourceAccountTrie
   675  		source.accountValues = elems
   676  		return source
   677  	}
   678  	syncer := setupSyncer(mkSource("source"))
   679  	done := checkStall(t, term)
   680  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   681  		t.Fatalf("sync failed: %v", err)
   682  	}
   683  	close(done)
   684  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   685  }
   686  
   687  // TestMultiSync tests a basic sync with multiple peers
   688  func TestMultiSync(t *testing.T) {
   689  	t.Parallel()
   690  
   691  	var (
   692  		once   sync.Once
   693  		cancel = make(chan struct{})
   694  		term   = func() {
   695  			once.Do(func() {
   696  				close(cancel)
   697  			})
   698  		}
   699  	)
   700  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   701  
   702  	mkSource := func(name string) *testPeer {
   703  		source := newTestPeer(name, t, term)
   704  		source.accountTrie = sourceAccountTrie
   705  		source.accountValues = elems
   706  		return source
   707  	}
   708  	syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
   709  	done := checkStall(t, term)
   710  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   711  		t.Fatalf("sync failed: %v", err)
   712  	}
   713  	close(done)
   714  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   715  }
   716  
   717  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   718  func TestSyncWithStorage(t *testing.T) {
   719  	t.Parallel()
   720  
   721  	var (
   722  		once   sync.Once
   723  		cancel = make(chan struct{})
   724  		term   = func() {
   725  			once.Do(func() {
   726  				close(cancel)
   727  			})
   728  		}
   729  	)
   730  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
   731  
   732  	mkSource := func(name string) *testPeer {
   733  		source := newTestPeer(name, t, term)
   734  		source.accountTrie = sourceAccountTrie
   735  		source.accountValues = elems
   736  		source.storageTries = storageTries
   737  		source.storageValues = storageElems
   738  		return source
   739  	}
   740  	syncer := setupSyncer(mkSource("sourceA"))
   741  	done := checkStall(t, term)
   742  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   743  		t.Fatalf("sync failed: %v", err)
   744  	}
   745  	close(done)
   746  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   747  }
   748  
   749  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   750  func TestMultiSyncManyUseless(t *testing.T) {
   751  	t.Parallel()
   752  
   753  	var (
   754  		once   sync.Once
   755  		cancel = make(chan struct{})
   756  		term   = func() {
   757  			once.Do(func() {
   758  				close(cancel)
   759  			})
   760  		}
   761  	)
   762  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   763  
   764  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   765  		source := newTestPeer(name, t, term)
   766  		source.accountTrie = sourceAccountTrie
   767  		source.accountValues = elems
   768  		source.storageTries = storageTries
   769  		source.storageValues = storageElems
   770  
   771  		if !noAccount {
   772  			source.accountRequestHandler = emptyRequestAccountRangeFn
   773  		}
   774  		if !noStorage {
   775  			source.storageRequestHandler = emptyStorageRequestHandler
   776  		}
   777  		if !noTrieNode {
   778  			source.trieRequestHandler = emptyTrieRequestHandler
   779  		}
   780  		return source
   781  	}
   782  
   783  	syncer := setupSyncer(
   784  		mkSource("full", true, true, true),
   785  		mkSource("noAccounts", false, true, true),
   786  		mkSource("noStorage", true, false, true),
   787  		mkSource("noTrie", true, true, false),
   788  	)
   789  	done := checkStall(t, term)
   790  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   791  		t.Fatalf("sync failed: %v", err)
   792  	}
   793  	close(done)
   794  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   795  }
   796  
   797  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   798  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   799  	var (
   800  		once   sync.Once
   801  		cancel = make(chan struct{})
   802  		term   = func() {
   803  			once.Do(func() {
   804  				close(cancel)
   805  			})
   806  		}
   807  	)
   808  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   809  
   810  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   811  		source := newTestPeer(name, t, term)
   812  		source.accountTrie = sourceAccountTrie
   813  		source.accountValues = elems
   814  		source.storageTries = storageTries
   815  		source.storageValues = storageElems
   816  
   817  		if !noAccount {
   818  			source.accountRequestHandler = emptyRequestAccountRangeFn
   819  		}
   820  		if !noStorage {
   821  			source.storageRequestHandler = emptyStorageRequestHandler
   822  		}
   823  		if !noTrieNode {
   824  			source.trieRequestHandler = emptyTrieRequestHandler
   825  		}
   826  		return source
   827  	}
   828  
   829  	syncer := setupSyncer(
   830  		mkSource("full", true, true, true),
   831  		mkSource("noAccounts", false, true, true),
   832  		mkSource("noStorage", true, false, true),
   833  		mkSource("noTrie", true, true, false),
   834  	)
   835  	// We're setting the timeout to very low, to increase the chance of the timeout
   836  	// being triggered. This was previously a cause of panic, when a response
   837  	// arrived simultaneously as a timeout was triggered.
   838  	syncer.rates.OverrideTTLLimit = time.Millisecond
   839  
   840  	done := checkStall(t, term)
   841  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   842  		t.Fatalf("sync failed: %v", err)
   843  	}
   844  	close(done)
   845  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   846  }
   847  
   848  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   849  func TestMultiSyncManyUnresponsive(t *testing.T) {
   850  	var (
   851  		once   sync.Once
   852  		cancel = make(chan struct{})
   853  		term   = func() {
   854  			once.Do(func() {
   855  				close(cancel)
   856  			})
   857  		}
   858  	)
   859  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   860  
   861  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   862  		source := newTestPeer(name, t, term)
   863  		source.accountTrie = sourceAccountTrie
   864  		source.accountValues = elems
   865  		source.storageTries = storageTries
   866  		source.storageValues = storageElems
   867  
   868  		if !noAccount {
   869  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   870  		}
   871  		if !noStorage {
   872  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   873  		}
   874  		if !noTrieNode {
   875  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   876  		}
   877  		return source
   878  	}
   879  
   880  	syncer := setupSyncer(
   881  		mkSource("full", true, true, true),
   882  		mkSource("noAccounts", false, true, true),
   883  		mkSource("noStorage", true, false, true),
   884  		mkSource("noTrie", true, true, false),
   885  	)
   886  	// We're setting the timeout to very low, to make the test run a bit faster
   887  	syncer.rates.OverrideTTLLimit = time.Millisecond
   888  
   889  	done := checkStall(t, term)
   890  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   891  		t.Fatalf("sync failed: %v", err)
   892  	}
   893  	close(done)
   894  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   895  }
   896  
   897  func checkStall(t *testing.T, term func()) chan struct{} {
   898  	testDone := make(chan struct{})
   899  	go func() {
   900  		select {
   901  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   902  			t.Log("Sync stalled")
   903  			term()
   904  		case <-testDone:
   905  			return
   906  		}
   907  	}()
   908  	return testDone
   909  }
   910  
   911  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   912  // account trie has a few boundary elements.
   913  func TestSyncBoundaryAccountTrie(t *testing.T) {
   914  	t.Parallel()
   915  
   916  	var (
   917  		once   sync.Once
   918  		cancel = make(chan struct{})
   919  		term   = func() {
   920  			once.Do(func() {
   921  				close(cancel)
   922  			})
   923  		}
   924  	)
   925  	sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
   926  
   927  	mkSource := func(name string) *testPeer {
   928  		source := newTestPeer(name, t, term)
   929  		source.accountTrie = sourceAccountTrie
   930  		source.accountValues = elems
   931  		return source
   932  	}
   933  	syncer := setupSyncer(
   934  		mkSource("peer-a"),
   935  		mkSource("peer-b"),
   936  	)
   937  	done := checkStall(t, term)
   938  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   939  		t.Fatalf("sync failed: %v", err)
   940  	}
   941  	close(done)
   942  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   943  }
   944  
   945  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
   946  // consistently returning very small results
   947  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
   948  	t.Parallel()
   949  
   950  	var (
   951  		once   sync.Once
   952  		cancel = make(chan struct{})
   953  		term   = func() {
   954  			once.Do(func() {
   955  				close(cancel)
   956  			})
   957  		}
   958  	)
   959  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   960  
   961  	mkSource := func(name string, slow bool) *testPeer {
   962  		source := newTestPeer(name, t, term)
   963  		source.accountTrie = sourceAccountTrie
   964  		source.accountValues = elems
   965  
   966  		if slow {
   967  			source.accountRequestHandler = starvingAccountRequestHandler
   968  		}
   969  		return source
   970  	}
   971  
   972  	syncer := setupSyncer(
   973  		mkSource("nice-a", false),
   974  		mkSource("nice-b", false),
   975  		mkSource("nice-c", false),
   976  		mkSource("capped", true),
   977  	)
   978  	done := checkStall(t, term)
   979  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   980  		t.Fatalf("sync failed: %v", err)
   981  	}
   982  	close(done)
   983  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   984  }
   985  
   986  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
   987  // code requests properly.
   988  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
   989  	t.Parallel()
   990  
   991  	var (
   992  		once   sync.Once
   993  		cancel = make(chan struct{})
   994  		term   = func() {
   995  			once.Do(func() {
   996  				close(cancel)
   997  			})
   998  		}
   999  	)
  1000  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1001  
  1002  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1003  		source := newTestPeer(name, t, term)
  1004  		source.accountTrie = sourceAccountTrie
  1005  		source.accountValues = elems
  1006  		source.codeRequestHandler = codeFn
  1007  		return source
  1008  	}
  1009  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1010  	// chance that the full set of codes requested are sent only to the
  1011  	// non-corrupt peer, which delivers everything in one go, and makes the
  1012  	// test moot
  1013  	syncer := setupSyncer(
  1014  		mkSource("capped", cappedCodeRequestHandler),
  1015  		mkSource("corrupt", corruptCodeRequestHandler),
  1016  	)
  1017  	done := checkStall(t, term)
  1018  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1019  		t.Fatalf("sync failed: %v", err)
  1020  	}
  1021  	close(done)
  1022  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1023  }
  1024  
  1025  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1026  	t.Parallel()
  1027  
  1028  	var (
  1029  		once   sync.Once
  1030  		cancel = make(chan struct{})
  1031  		term   = func() {
  1032  			once.Do(func() {
  1033  				close(cancel)
  1034  			})
  1035  		}
  1036  	)
  1037  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1038  
  1039  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1040  		source := newTestPeer(name, t, term)
  1041  		source.accountTrie = sourceAccountTrie
  1042  		source.accountValues = elems
  1043  		source.accountRequestHandler = accFn
  1044  		return source
  1045  	}
  1046  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1047  	// chance that the full set of codes requested are sent only to the
  1048  	// non-corrupt peer, which delivers everything in one go, and makes the
  1049  	// test moot
  1050  	syncer := setupSyncer(
  1051  		mkSource("capped", defaultAccountRequestHandler),
  1052  		mkSource("corrupt", corruptAccountRequestHandler),
  1053  	)
  1054  	done := checkStall(t, term)
  1055  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1056  		t.Fatalf("sync failed: %v", err)
  1057  	}
  1058  	close(done)
  1059  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1060  }
  1061  
  1062  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1063  // one by one
  1064  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1065  	t.Parallel()
  1066  
  1067  	var (
  1068  		once   sync.Once
  1069  		cancel = make(chan struct{})
  1070  		term   = func() {
  1071  			once.Do(func() {
  1072  				close(cancel)
  1073  			})
  1074  		}
  1075  	)
  1076  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1077  
  1078  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1079  		source := newTestPeer(name, t, term)
  1080  		source.accountTrie = sourceAccountTrie
  1081  		source.accountValues = elems
  1082  		source.codeRequestHandler = codeFn
  1083  		return source
  1084  	}
  1085  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1086  	// so it shouldn't be more than that
  1087  	var counter int
  1088  	syncer := setupSyncer(
  1089  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1090  			counter++
  1091  			return cappedCodeRequestHandler(t, id, hashes, max)
  1092  		}),
  1093  	)
  1094  	done := checkStall(t, term)
  1095  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1096  		t.Fatalf("sync failed: %v", err)
  1097  	}
  1098  	close(done)
  1099  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1100  	// deduplication is per request batch. If it were a perfect global dedup,
  1101  	// we would expect only 8 requests. If there were no dedup, there would be
  1102  	// 3k requests.
  1103  	// We expect somewhere below 100 requests for these 8 unique hashes.
  1104  	if threshold := 100; counter > threshold {
  1105  		t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
  1106  	}
  1107  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1108  }
  1109  
  1110  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1111  // storage trie has a few boundary elements.
  1112  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1113  	t.Parallel()
  1114  
  1115  	var (
  1116  		once   sync.Once
  1117  		cancel = make(chan struct{})
  1118  		term   = func() {
  1119  			once.Do(func() {
  1120  				close(cancel)
  1121  			})
  1122  		}
  1123  	)
  1124  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1125  
  1126  	mkSource := func(name string) *testPeer {
  1127  		source := newTestPeer(name, t, term)
  1128  		source.accountTrie = sourceAccountTrie
  1129  		source.accountValues = elems
  1130  		source.storageTries = storageTries
  1131  		source.storageValues = storageElems
  1132  		return source
  1133  	}
  1134  	syncer := setupSyncer(
  1135  		mkSource("peer-a"),
  1136  		mkSource("peer-b"),
  1137  	)
  1138  	done := checkStall(t, term)
  1139  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1140  		t.Fatalf("sync failed: %v", err)
  1141  	}
  1142  	close(done)
  1143  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1144  }
  1145  
  1146  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1147  // consistently returning very small results
  1148  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1149  	t.Parallel()
  1150  
  1151  	var (
  1152  		once   sync.Once
  1153  		cancel = make(chan struct{})
  1154  		term   = func() {
  1155  			once.Do(func() {
  1156  				close(cancel)
  1157  			})
  1158  		}
  1159  	)
  1160  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1161  
  1162  	mkSource := func(name string, slow bool) *testPeer {
  1163  		source := newTestPeer(name, t, term)
  1164  		source.accountTrie = sourceAccountTrie
  1165  		source.accountValues = elems
  1166  		source.storageTries = storageTries
  1167  		source.storageValues = storageElems
  1168  
  1169  		if slow {
  1170  			source.storageRequestHandler = starvingStorageRequestHandler
  1171  		}
  1172  		return source
  1173  	}
  1174  
  1175  	syncer := setupSyncer(
  1176  		mkSource("nice-a", false),
  1177  		mkSource("slow", true),
  1178  	)
  1179  	done := checkStall(t, term)
  1180  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1181  		t.Fatalf("sync failed: %v", err)
  1182  	}
  1183  	close(done)
  1184  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1185  }
  1186  
  1187  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1188  // sometimes sending bad proofs
  1189  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1190  	t.Parallel()
  1191  
  1192  	var (
  1193  		once   sync.Once
  1194  		cancel = make(chan struct{})
  1195  		term   = func() {
  1196  			once.Do(func() {
  1197  				close(cancel)
  1198  			})
  1199  		}
  1200  	)
  1201  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1202  
  1203  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1204  		source := newTestPeer(name, t, term)
  1205  		source.accountTrie = sourceAccountTrie
  1206  		source.accountValues = elems
  1207  		source.storageTries = storageTries
  1208  		source.storageValues = storageElems
  1209  		source.storageRequestHandler = handler
  1210  		return source
  1211  	}
  1212  
  1213  	syncer := setupSyncer(
  1214  		mkSource("nice-a", defaultStorageRequestHandler),
  1215  		mkSource("nice-b", defaultStorageRequestHandler),
  1216  		mkSource("nice-c", defaultStorageRequestHandler),
  1217  		mkSource("corrupt", corruptStorageRequestHandler),
  1218  	)
  1219  	done := checkStall(t, term)
  1220  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1221  		t.Fatalf("sync failed: %v", err)
  1222  	}
  1223  	close(done)
  1224  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1225  }
  1226  
  1227  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1228  	t.Parallel()
  1229  
  1230  	var (
  1231  		once   sync.Once
  1232  		cancel = make(chan struct{})
  1233  		term   = func() {
  1234  			once.Do(func() {
  1235  				close(cancel)
  1236  			})
  1237  		}
  1238  	)
  1239  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1240  
  1241  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1242  		source := newTestPeer(name, t, term)
  1243  		source.accountTrie = sourceAccountTrie
  1244  		source.accountValues = elems
  1245  		source.storageTries = storageTries
  1246  		source.storageValues = storageElems
  1247  		source.storageRequestHandler = handler
  1248  		return source
  1249  	}
  1250  	syncer := setupSyncer(
  1251  		mkSource("nice-a", defaultStorageRequestHandler),
  1252  		mkSource("nice-b", defaultStorageRequestHandler),
  1253  		mkSource("nice-c", defaultStorageRequestHandler),
  1254  		mkSource("corrupt", noProofStorageRequestHandler),
  1255  	)
  1256  	done := checkStall(t, term)
  1257  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1258  		t.Fatalf("sync failed: %v", err)
  1259  	}
  1260  	close(done)
  1261  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1262  }
  1263  
  1264  // TestSyncWithStorage tests  basic sync using accounts + storage + code, against
  1265  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1266  // an error, where the recipient erroneously clipped the boundary nodes, but
  1267  // did not mark the account for healing.
  1268  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1269  	t.Parallel()
  1270  	var (
  1271  		once   sync.Once
  1272  		cancel = make(chan struct{})
  1273  		term   = func() {
  1274  			once.Do(func() {
  1275  				close(cancel)
  1276  			})
  1277  		}
  1278  	)
  1279  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1280  
  1281  	mkSource := func(name string) *testPeer {
  1282  		source := newTestPeer(name, t, term)
  1283  		source.accountTrie = sourceAccountTrie
  1284  		source.accountValues = elems
  1285  		source.storageTries = storageTries
  1286  		source.storageValues = storageElems
  1287  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1288  		return source
  1289  	}
  1290  	syncer := setupSyncer(mkSource("sourceA"))
  1291  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1292  		t.Fatalf("sync failed: %v", err)
  1293  	}
  1294  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1295  }
  1296  
  1297  type kv struct {
  1298  	k, v []byte
  1299  }
  1300  
  1301  // Some helpers for sorting
  1302  type entrySlice []*kv
  1303  
  1304  func (p entrySlice) Len() int           { return len(p) }
  1305  func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1306  func (p entrySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
  1307  
  1308  func key32(i uint64) []byte {
  1309  	key := make([]byte, 32)
  1310  	binary.LittleEndian.PutUint64(key, i)
  1311  	return key
  1312  }
  1313  
  1314  var (
  1315  	codehashes = []common.Hash{
  1316  		crypto.Keccak256Hash([]byte{0}),
  1317  		crypto.Keccak256Hash([]byte{1}),
  1318  		crypto.Keccak256Hash([]byte{2}),
  1319  		crypto.Keccak256Hash([]byte{3}),
  1320  		crypto.Keccak256Hash([]byte{4}),
  1321  		crypto.Keccak256Hash([]byte{5}),
  1322  		crypto.Keccak256Hash([]byte{6}),
  1323  		crypto.Keccak256Hash([]byte{7}),
  1324  	}
  1325  )
  1326  
  1327  // getCodeHash returns a pseudo-random code hash
  1328  func getCodeHash(i uint64) []byte {
  1329  	h := codehashes[int(i)%len(codehashes)]
  1330  	return common.CopyBytes(h[:])
  1331  }
  1332  
  1333  // getCodeByHash convenience function to lookup the code from the code hash
  1334  func getCodeByHash(hash common.Hash) []byte {
  1335  	if hash == emptyCode {
  1336  		return nil
  1337  	}
  1338  	for i, h := range codehashes {
  1339  		if h == hash {
  1340  			return []byte{byte(i)}
  1341  		}
  1342  	}
  1343  	return nil
  1344  }
  1345  
  1346  // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1347  func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
  1348  	db := trie.NewDatabase(rawdb.NewMemoryDatabase())
  1349  	accTrie, _ := trie.New(common.Hash{}, db)
  1350  	var entries entrySlice
  1351  	for i := uint64(1); i <= uint64(n); i++ {
  1352  		value, _ := rlp.EncodeToBytes(state.Account{
  1353  			Nonce:    i,
  1354  			Balance:  big.NewInt(int64(i)),
  1355  			Root:     emptyRoot,
  1356  			CodeHash: getCodeHash(i),
  1357  		})
  1358  		key := key32(i)
  1359  		elem := &kv{key, value}
  1360  		accTrie.Update(elem.k, elem.v)
  1361  		entries = append(entries, elem)
  1362  	}
  1363  	sort.Sort(entries)
  1364  	accTrie.Commit(nil)
  1365  	return accTrie, entries
  1366  }
  1367  
  1368  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1369  // accounts normally, this function will fill a few accounts which have
  1370  // boundary hash.
  1371  func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
  1372  	var (
  1373  		entries    entrySlice
  1374  		boundaries []common.Hash
  1375  
  1376  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1377  		trie, _ = trie.New(common.Hash{}, db)
  1378  	)
  1379  	// Initialize boundaries
  1380  	var next common.Hash
  1381  	step := new(big.Int).Sub(
  1382  		new(big.Int).Div(
  1383  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1384  			big.NewInt(int64(accountConcurrency)),
  1385  		), common.Big1,
  1386  	)
  1387  	for i := 0; i < accountConcurrency; i++ {
  1388  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1389  		if i == accountConcurrency-1 {
  1390  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1391  		}
  1392  		boundaries = append(boundaries, last)
  1393  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1394  	}
  1395  	// Fill boundary accounts
  1396  	for i := 0; i < len(boundaries); i++ {
  1397  		value, _ := rlp.EncodeToBytes(state.Account{
  1398  			Nonce:    uint64(0),
  1399  			Balance:  big.NewInt(int64(i)),
  1400  			Root:     emptyRoot,
  1401  			CodeHash: getCodeHash(uint64(i)),
  1402  		})
  1403  		elem := &kv{boundaries[i].Bytes(), value}
  1404  		trie.Update(elem.k, elem.v)
  1405  		entries = append(entries, elem)
  1406  	}
  1407  	// Fill other accounts if required
  1408  	for i := uint64(1); i <= uint64(n); i++ {
  1409  		value, _ := rlp.EncodeToBytes(state.Account{
  1410  			Nonce:    i,
  1411  			Balance:  big.NewInt(int64(i)),
  1412  			Root:     emptyRoot,
  1413  			CodeHash: getCodeHash(i),
  1414  		})
  1415  		elem := &kv{key32(i), value}
  1416  		trie.Update(elem.k, elem.v)
  1417  		entries = append(entries, elem)
  1418  	}
  1419  	sort.Sort(entries)
  1420  	trie.Commit(nil)
  1421  	return trie, entries
  1422  }
  1423  
  1424  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1425  // has a unique storage set.
  1426  func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1427  	var (
  1428  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1429  		accTrie, _     = trie.New(common.Hash{}, db)
  1430  		entries        entrySlice
  1431  		storageTries   = make(map[common.Hash]*trie.Trie)
  1432  		storageEntries = make(map[common.Hash]entrySlice)
  1433  	)
  1434  	// Create n accounts in the trie
  1435  	for i := uint64(1); i <= uint64(accounts); i++ {
  1436  		key := key32(i)
  1437  		codehash := emptyCode[:]
  1438  		if code {
  1439  			codehash = getCodeHash(i)
  1440  		}
  1441  		// Create a storage trie
  1442  		stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
  1443  		stRoot := stTrie.Hash()
  1444  		stTrie.Commit(nil)
  1445  		value, _ := rlp.EncodeToBytes(state.Account{
  1446  			Nonce:    i,
  1447  			Balance:  big.NewInt(int64(i)),
  1448  			Root:     stRoot,
  1449  			CodeHash: codehash,
  1450  		})
  1451  		elem := &kv{key, value}
  1452  		accTrie.Update(elem.k, elem.v)
  1453  		entries = append(entries, elem)
  1454  
  1455  		storageTries[common.BytesToHash(key)] = stTrie
  1456  		storageEntries[common.BytesToHash(key)] = stEntries
  1457  	}
  1458  	sort.Sort(entries)
  1459  
  1460  	accTrie.Commit(nil)
  1461  	return accTrie, entries, storageTries, storageEntries
  1462  }
  1463  
  1464  // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1465  func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1466  	var (
  1467  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1468  		accTrie, _     = trie.New(common.Hash{}, db)
  1469  		entries        entrySlice
  1470  		storageTries   = make(map[common.Hash]*trie.Trie)
  1471  		storageEntries = make(map[common.Hash]entrySlice)
  1472  	)
  1473  	// Make a storage trie which we reuse for the whole lot
  1474  	var (
  1475  		stTrie    *trie.Trie
  1476  		stEntries entrySlice
  1477  	)
  1478  	if boundary {
  1479  		stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
  1480  	} else {
  1481  		stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
  1482  	}
  1483  	stRoot := stTrie.Hash()
  1484  
  1485  	// Create n accounts in the trie
  1486  	for i := uint64(1); i <= uint64(accounts); i++ {
  1487  		key := key32(i)
  1488  		codehash := emptyCode[:]
  1489  		if code {
  1490  			codehash = getCodeHash(i)
  1491  		}
  1492  		value, _ := rlp.EncodeToBytes(state.Account{
  1493  			Nonce:    i,
  1494  			Balance:  big.NewInt(int64(i)),
  1495  			Root:     stRoot,
  1496  			CodeHash: codehash,
  1497  		})
  1498  		elem := &kv{key, value}
  1499  		accTrie.Update(elem.k, elem.v)
  1500  		entries = append(entries, elem)
  1501  		// we reuse the same one for all accounts
  1502  		storageTries[common.BytesToHash(key)] = stTrie
  1503  		storageEntries[common.BytesToHash(key)] = stEntries
  1504  	}
  1505  	sort.Sort(entries)
  1506  	stTrie.Commit(nil)
  1507  	accTrie.Commit(nil)
  1508  	return accTrie, entries, storageTries, storageEntries
  1509  }
  1510  
  1511  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1512  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1513  // that tries are unique.
  1514  func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
  1515  	trie, _ := trie.New(common.Hash{}, db)
  1516  	var entries entrySlice
  1517  	for i := uint64(1); i <= n; i++ {
  1518  		// store 'x' at slot 'x'
  1519  		slotValue := key32(i + seed)
  1520  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1521  
  1522  		slotKey := key32(i)
  1523  		key := crypto.Keccak256Hash(slotKey[:])
  1524  
  1525  		elem := &kv{key[:], rlpSlotValue}
  1526  		trie.Update(elem.k, elem.v)
  1527  		entries = append(entries, elem)
  1528  	}
  1529  	sort.Sort(entries)
  1530  	trie.Commit(nil)
  1531  	return trie, entries
  1532  }
  1533  
  1534  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1535  // storage slots normally, this function will fill a few slots which have
  1536  // boundary hash.
  1537  func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
  1538  	var (
  1539  		entries    entrySlice
  1540  		boundaries []common.Hash
  1541  		trie, _    = trie.New(common.Hash{}, db)
  1542  	)
  1543  	// Initialize boundaries
  1544  	var next common.Hash
  1545  	step := new(big.Int).Sub(
  1546  		new(big.Int).Div(
  1547  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1548  			big.NewInt(int64(accountConcurrency)),
  1549  		), common.Big1,
  1550  	)
  1551  	for i := 0; i < accountConcurrency; i++ {
  1552  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1553  		if i == accountConcurrency-1 {
  1554  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1555  		}
  1556  		boundaries = append(boundaries, last)
  1557  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1558  	}
  1559  	// Fill boundary slots
  1560  	for i := 0; i < len(boundaries); i++ {
  1561  		key := boundaries[i]
  1562  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1563  
  1564  		elem := &kv{key[:], val}
  1565  		trie.Update(elem.k, elem.v)
  1566  		entries = append(entries, elem)
  1567  	}
  1568  	// Fill other slots if required
  1569  	for i := uint64(1); i <= uint64(n); i++ {
  1570  		slotKey := key32(i)
  1571  		key := crypto.Keccak256Hash(slotKey[:])
  1572  
  1573  		slotValue := key32(i)
  1574  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1575  
  1576  		elem := &kv{key[:], rlpSlotValue}
  1577  		trie.Update(elem.k, elem.v)
  1578  		entries = append(entries, elem)
  1579  	}
  1580  	sort.Sort(entries)
  1581  	trie.Commit(nil)
  1582  	return trie, entries
  1583  }
  1584  
  1585  func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1586  	t.Helper()
  1587  	triedb := trie.NewDatabase(db)
  1588  	accTrie, err := trie.New(root, triedb)
  1589  	if err != nil {
  1590  		t.Fatal(err)
  1591  	}
  1592  	accounts, slots := 0, 0
  1593  	accIt := trie.NewIterator(accTrie.NodeIterator(nil))
  1594  	for accIt.Next() {
  1595  		var acc struct {
  1596  			Nonce    uint64
  1597  			Balance  *big.Int
  1598  			Root     common.Hash
  1599  			CodeHash []byte
  1600  		}
  1601  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1602  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1603  		}
  1604  		accounts++
  1605  		if acc.Root != emptyRoot {
  1606  			storeTrie, err := trie.NewSecure(acc.Root, triedb)
  1607  			if err != nil {
  1608  				t.Fatal(err)
  1609  			}
  1610  			storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
  1611  			for storeIt.Next() {
  1612  				slots++
  1613  			}
  1614  			if err := storeIt.Err; err != nil {
  1615  				t.Fatal(err)
  1616  			}
  1617  		}
  1618  	}
  1619  	if err := accIt.Err; err != nil {
  1620  		t.Fatal(err)
  1621  	}
  1622  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1623  }
  1624  
  1625  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1626  // state healing
  1627  func TestSyncAccountPerformance(t *testing.T) {
  1628  	// Set the account concurrency to 1. This _should_ result in the
  1629  	// range root to become correct, and there should be no healing needed
  1630  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1631  	accountConcurrency = 1
  1632  
  1633  	var (
  1634  		once   sync.Once
  1635  		cancel = make(chan struct{})
  1636  		term   = func() {
  1637  			once.Do(func() {
  1638  				close(cancel)
  1639  			})
  1640  		}
  1641  	)
  1642  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  1643  
  1644  	mkSource := func(name string) *testPeer {
  1645  		source := newTestPeer(name, t, term)
  1646  		source.accountTrie = sourceAccountTrie
  1647  		source.accountValues = elems
  1648  		return source
  1649  	}
  1650  	src := mkSource("source")
  1651  	syncer := setupSyncer(src)
  1652  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1653  		t.Fatalf("sync failed: %v", err)
  1654  	}
  1655  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1656  	// The trie root will always be requested, since it is added when the snap
  1657  	// sync cycle starts. When popping the queue, we do not look it up again.
  1658  	// Doing so would bring this number down to zero in this artificial testcase,
  1659  	// but only add extra IO for no reason in practice.
  1660  	if have, want := src.nTrienodeRequests, 1; have != want {
  1661  		fmt.Printf(src.Stats())
  1662  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1663  	}
  1664  }
  1665  
  1666  func TestSlotEstimation(t *testing.T) {
  1667  	for i, tc := range []struct {
  1668  		last  common.Hash
  1669  		count int
  1670  		want  uint64
  1671  	}{
  1672  		{
  1673  			// Half the space
  1674  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1675  			100,
  1676  			100,
  1677  		},
  1678  		{
  1679  			// 1 / 16th
  1680  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1681  			100,
  1682  			1500,
  1683  		},
  1684  		{
  1685  			// Bit more than 1 / 16th
  1686  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1687  			100,
  1688  			1499,
  1689  		},
  1690  		{
  1691  			// Almost everything
  1692  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1693  			100,
  1694  			6,
  1695  		},
  1696  		{
  1697  			// Almost nothing -- should lead to error
  1698  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1699  			1,
  1700  			0,
  1701  		},
  1702  		{
  1703  			// Nothing -- should lead to error
  1704  			common.Hash{},
  1705  			100,
  1706  			0,
  1707  		},
  1708  	} {
  1709  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1710  		if want := tc.want; have != want {
  1711  			t.Errorf("test %d: have %d want %d", i, have, want)
  1712  		}
  1713  	}
  1714  }