github.com/fff-chain/go-fff@v0.0.0-20220726032732-1c84420b8a99/eth/protocols/snap/sync_test.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	"sort"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/fff-chain/go-fff/common"
    31  	"github.com/fff-chain/go-fff/core/rawdb"
    32  	"github.com/fff-chain/go-fff/core/state"
    33  	"github.com/fff-chain/go-fff/crypto"
    34  	"github.com/fff-chain/go-fff/ethdb"
    35  	"github.com/fff-chain/go-fff/light"
    36  	"github.com/fff-chain/go-fff/log"
    37  	"github.com/fff-chain/go-fff/rlp"
    38  	"github.com/fff-chain/go-fff/trie"
    39  	"golang.org/x/crypto/sha3"
    40  )
    41  
    42  func TestHashing(t *testing.T) {
    43  	t.Parallel()
    44  
    45  	var bytecodes = make([][]byte, 10)
    46  	for i := 0; i < len(bytecodes); i++ {
    47  		buf := make([]byte, 100)
    48  		rand.Read(buf)
    49  		bytecodes[i] = buf
    50  	}
    51  	var want, got string
    52  	var old = func() {
    53  		hasher := sha3.NewLegacyKeccak256()
    54  		for i := 0; i < len(bytecodes); i++ {
    55  			hasher.Reset()
    56  			hasher.Write(bytecodes[i])
    57  			hash := hasher.Sum(nil)
    58  			got = fmt.Sprintf("%v\n%v", got, hash)
    59  		}
    60  	}
    61  	var new = func() {
    62  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    63  		var hash = make([]byte, 32)
    64  		for i := 0; i < len(bytecodes); i++ {
    65  			hasher.Reset()
    66  			hasher.Write(bytecodes[i])
    67  			hasher.Read(hash)
    68  			want = fmt.Sprintf("%v\n%v", want, hash)
    69  		}
    70  	}
    71  	old()
    72  	new()
    73  	if want != got {
    74  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    75  	}
    76  }
    77  
    78  func BenchmarkHashing(b *testing.B) {
    79  	var bytecodes = make([][]byte, 10000)
    80  	for i := 0; i < len(bytecodes); i++ {
    81  		buf := make([]byte, 100)
    82  		rand.Read(buf)
    83  		bytecodes[i] = buf
    84  	}
    85  	var old = func() {
    86  		hasher := sha3.NewLegacyKeccak256()
    87  		for i := 0; i < len(bytecodes); i++ {
    88  			hasher.Reset()
    89  			hasher.Write(bytecodes[i])
    90  			hasher.Sum(nil)
    91  		}
    92  	}
    93  	var new = func() {
    94  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    95  		var hash = make([]byte, 32)
    96  		for i := 0; i < len(bytecodes); i++ {
    97  			hasher.Reset()
    98  			hasher.Write(bytecodes[i])
    99  			hasher.Read(hash)
   100  		}
   101  	}
   102  	b.Run("old", func(b *testing.B) {
   103  		b.ReportAllocs()
   104  		for i := 0; i < b.N; i++ {
   105  			old()
   106  		}
   107  	})
   108  	b.Run("new", func(b *testing.B) {
   109  		b.ReportAllocs()
   110  		for i := 0; i < b.N; i++ {
   111  			new()
   112  		}
   113  	})
   114  }
   115  
   116  type (
   117  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   118  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   119  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   120  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   121  )
   122  
   123  type testPeer struct {
   124  	id            string
   125  	test          *testing.T
   126  	remote        *Syncer
   127  	logger        log.Logger
   128  	accountTrie   *trie.Trie
   129  	accountValues entrySlice
   130  	storageTries  map[common.Hash]*trie.Trie
   131  	storageValues map[common.Hash]entrySlice
   132  
   133  	accountRequestHandler accountHandlerFunc
   134  	storageRequestHandler storageHandlerFunc
   135  	trieRequestHandler    trieHandlerFunc
   136  	codeRequestHandler    codeHandlerFunc
   137  	term                  func()
   138  
   139  	// counters
   140  	nAccountRequests  int
   141  	nStorageRequests  int
   142  	nBytecodeRequests int
   143  	nTrienodeRequests int
   144  }
   145  
   146  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   147  	peer := &testPeer{
   148  		id:                    id,
   149  		test:                  t,
   150  		logger:                log.New("id", id),
   151  		accountRequestHandler: defaultAccountRequestHandler,
   152  		trieRequestHandler:    defaultTrieRequestHandler,
   153  		storageRequestHandler: defaultStorageRequestHandler,
   154  		codeRequestHandler:    defaultCodeRequestHandler,
   155  		term:                  term,
   156  	}
   157  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   158  	//peer.logger.SetHandler(stderrHandler)
   159  	return peer
   160  }
   161  
   162  func (t *testPeer) ID() string      { return t.id }
   163  func (t *testPeer) Log() log.Logger { return t.logger }
   164  
   165  func (t *testPeer) Stats() string {
   166  	return fmt.Sprintf(`Account requests: %d
   167  Storage requests: %d
   168  Bytecode requests: %d
   169  Trienode requests: %d
   170  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   171  }
   172  
   173  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   174  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   175  	t.nAccountRequests++
   176  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   177  	return nil
   178  }
   179  
   180  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   181  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   182  	t.nTrienodeRequests++
   183  	go t.trieRequestHandler(t, id, root, paths, bytes)
   184  	return nil
   185  }
   186  
   187  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   188  	t.nStorageRequests++
   189  	if len(accounts) == 1 && origin != nil {
   190  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   191  	} else {
   192  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   193  	}
   194  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   195  	return nil
   196  }
   197  
   198  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   199  	t.nBytecodeRequests++
   200  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   201  	go t.codeRequestHandler(t, id, hashes, bytes)
   202  	return nil
   203  }
   204  
   205  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   206  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   207  	// Pass the response
   208  	var nodes [][]byte
   209  	for _, pathset := range paths {
   210  		switch len(pathset) {
   211  		case 1:
   212  			blob, _, err := t.accountTrie.TryGetNode(pathset[0])
   213  			if err != nil {
   214  				t.logger.Info("Error handling req", "error", err)
   215  				break
   216  			}
   217  			nodes = append(nodes, blob)
   218  		default:
   219  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   220  			for _, path := range pathset[1:] {
   221  				blob, _, err := account.TryGetNode(path)
   222  				if err != nil {
   223  					t.logger.Info("Error handling req", "error", err)
   224  					break
   225  				}
   226  				nodes = append(nodes, blob)
   227  			}
   228  		}
   229  	}
   230  	t.remote.OnTrieNodes(t, requestId, nodes)
   231  	return nil
   232  }
   233  
   234  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   235  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   236  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   237  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   238  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   239  		t.term()
   240  		return err
   241  	}
   242  	return nil
   243  }
   244  
   245  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   246  	var size uint64
   247  	if limit == (common.Hash{}) {
   248  		limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   249  	}
   250  	for _, entry := range t.accountValues {
   251  		if size > cap {
   252  			break
   253  		}
   254  		if bytes.Compare(origin[:], entry.k) <= 0 {
   255  			keys = append(keys, common.BytesToHash(entry.k))
   256  			vals = append(vals, entry.v)
   257  			size += uint64(32 + len(entry.v))
   258  		}
   259  		// If we've exceeded the request threshold, abort
   260  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   261  			break
   262  		}
   263  	}
   264  	// Unless we send the entire trie, we need to supply proofs
   265  	// Actually, we need to supply proofs either way! This seems to be an implementation
   266  	// quirk in go-ethereum
   267  	proof := light.NewNodeSet()
   268  	if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   269  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   270  	}
   271  	if len(keys) > 0 {
   272  		lastK := (keys[len(keys)-1])[:]
   273  		if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
   274  			t.logger.Error("Could not prove last item", "error", err)
   275  		}
   276  	}
   277  	for _, blob := range proof.NodeList() {
   278  		proofs = append(proofs, blob)
   279  	}
   280  	return keys, vals, proofs
   281  }
   282  
   283  // defaultStorageRequestHandler is a well-behaving storage request handler
   284  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   285  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   286  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   287  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   288  		t.term()
   289  	}
   290  	return nil
   291  }
   292  
   293  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   294  	var bytecodes [][]byte
   295  	for _, h := range hashes {
   296  		bytecodes = append(bytecodes, getCodeByHash(h))
   297  	}
   298  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   299  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   300  		t.term()
   301  	}
   302  	return nil
   303  }
   304  
   305  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   306  	var size uint64
   307  	for _, account := range accounts {
   308  		// The first account might start from a different origin and end sooner
   309  		var originHash common.Hash
   310  		if len(origin) > 0 {
   311  			originHash = common.BytesToHash(origin)
   312  		}
   313  		var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   314  		if len(limit) > 0 {
   315  			limitHash = common.BytesToHash(limit)
   316  		}
   317  		var (
   318  			keys  []common.Hash
   319  			vals  [][]byte
   320  			abort bool
   321  		)
   322  		for _, entry := range t.storageValues[account] {
   323  			if size >= max {
   324  				abort = true
   325  				break
   326  			}
   327  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   328  				continue
   329  			}
   330  			keys = append(keys, common.BytesToHash(entry.k))
   331  			vals = append(vals, entry.v)
   332  			size += uint64(32 + len(entry.v))
   333  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   334  				break
   335  			}
   336  		}
   337  		hashes = append(hashes, keys)
   338  		slots = append(slots, vals)
   339  
   340  		// Generate the Merkle proofs for the first and last storage slot, but
   341  		// only if the response was capped. If the entire storage trie included
   342  		// in the response, no need for any proofs.
   343  		if originHash != (common.Hash{}) || abort {
   344  			// If we're aborting, we need to prove the first and last item
   345  			// This terminates the response (and thus the loop)
   346  			proof := light.NewNodeSet()
   347  			stTrie := t.storageTries[account]
   348  
   349  			// Here's a potential gotcha: when constructing the proof, we cannot
   350  			// use the 'origin' slice directly, but must use the full 32-byte
   351  			// hash form.
   352  			if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
   353  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   354  			}
   355  			if len(keys) > 0 {
   356  				lastK := (keys[len(keys)-1])[:]
   357  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   358  					t.logger.Error("Could not prove last item", "error", err)
   359  				}
   360  			}
   361  			for _, blob := range proof.NodeList() {
   362  				proofs = append(proofs, blob)
   363  			}
   364  			break
   365  		}
   366  	}
   367  	return hashes, slots, proofs
   368  }
   369  
   370  //  the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
   371  // supplies the proof for the last account, even if it is 'complete'.h
   372  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   373  	var size uint64
   374  	max = max * 3 / 4
   375  
   376  	var origin common.Hash
   377  	if len(bOrigin) > 0 {
   378  		origin = common.BytesToHash(bOrigin)
   379  	}
   380  	var exit bool
   381  	for i, account := range accounts {
   382  		var keys []common.Hash
   383  		var vals [][]byte
   384  		for _, entry := range t.storageValues[account] {
   385  			if bytes.Compare(entry.k, origin[:]) < 0 {
   386  				exit = true
   387  			}
   388  			keys = append(keys, common.BytesToHash(entry.k))
   389  			vals = append(vals, entry.v)
   390  			size += uint64(32 + len(entry.v))
   391  			if size > max {
   392  				exit = true
   393  			}
   394  		}
   395  		if i == len(accounts)-1 {
   396  			exit = true
   397  		}
   398  		hashes = append(hashes, keys)
   399  		slots = append(slots, vals)
   400  
   401  		if exit {
   402  			// If we're aborting, we need to prove the first and last item
   403  			// This terminates the response (and thus the loop)
   404  			proof := light.NewNodeSet()
   405  			stTrie := t.storageTries[account]
   406  
   407  			// Here's a potential gotcha: when constructing the proof, we cannot
   408  			// use the 'origin' slice directly, but must use the full 32-byte
   409  			// hash form.
   410  			if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   411  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   412  					"error", err)
   413  			}
   414  			if len(keys) > 0 {
   415  				lastK := (keys[len(keys)-1])[:]
   416  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   417  					t.logger.Error("Could not prove last item", "error", err)
   418  				}
   419  			}
   420  			for _, blob := range proof.NodeList() {
   421  				proofs = append(proofs, blob)
   422  			}
   423  			break
   424  		}
   425  	}
   426  	return hashes, slots, proofs
   427  }
   428  
   429  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   430  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   431  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   432  	return nil
   433  }
   434  
   435  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   436  	return nil
   437  }
   438  
   439  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   440  	t.remote.OnTrieNodes(t, requestId, nil)
   441  	return nil
   442  }
   443  
   444  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   445  	return nil
   446  }
   447  
   448  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   449  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   450  	return nil
   451  }
   452  
   453  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   454  	return nil
   455  }
   456  
   457  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   458  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   459  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   460  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   461  		t.term()
   462  	}
   463  	return nil
   464  }
   465  
   466  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   467  //	var bytecodes [][]byte
   468  //	t.remote.OnByteCodes(t, id, bytecodes)
   469  //	return nil
   470  //}
   471  
   472  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   473  	var bytecodes [][]byte
   474  	for _, h := range hashes {
   475  		// Send back the hashes
   476  		bytecodes = append(bytecodes, h[:])
   477  	}
   478  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   479  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   480  		// Mimic the real-life handler, which drops a peer on errors
   481  		t.remote.Unregister(t.id)
   482  	}
   483  	return nil
   484  }
   485  
   486  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   487  	var bytecodes [][]byte
   488  	for _, h := range hashes[:1] {
   489  		bytecodes = append(bytecodes, getCodeByHash(h))
   490  	}
   491  	// Missing bytecode can be retrieved again, no error expected
   492  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   493  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   494  		t.term()
   495  	}
   496  	return nil
   497  }
   498  
   499  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   500  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   501  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   502  }
   503  
   504  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   505  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   506  }
   507  
   508  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   509  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   510  //}
   511  
   512  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   513  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   514  	if len(proofs) > 0 {
   515  		proofs = proofs[1:]
   516  	}
   517  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   518  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   519  		// Mimic the real-life handler, which drops a peer on errors
   520  		t.remote.Unregister(t.id)
   521  	}
   522  	return nil
   523  }
   524  
   525  // corruptStorageRequestHandler doesn't provide good proofs
   526  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   527  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   528  	if len(proofs) > 0 {
   529  		proofs = proofs[1:]
   530  	}
   531  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   532  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   533  		// Mimic the real-life handler, which drops a peer on errors
   534  		t.remote.Unregister(t.id)
   535  	}
   536  	return nil
   537  }
   538  
   539  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   540  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   541  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   542  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   543  		// Mimic the real-life handler, which drops a peer on errors
   544  		t.remote.Unregister(t.id)
   545  	}
   546  	return nil
   547  }
   548  
   549  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   550  // also ship the entire trie inside the proof. If the attack is successful,
   551  // the remote side does not do any follow-up requests
   552  func TestSyncBloatedProof(t *testing.T) {
   553  	t.Parallel()
   554  
   555  	var (
   556  		once   sync.Once
   557  		cancel = make(chan struct{})
   558  		term   = func() {
   559  			once.Do(func() {
   560  				close(cancel)
   561  			})
   562  		}
   563  	)
   564  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   565  	source := newTestPeer("source", t, term)
   566  	source.accountTrie = sourceAccountTrie
   567  	source.accountValues = elems
   568  
   569  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   570  		var (
   571  			proofs [][]byte
   572  			keys   []common.Hash
   573  			vals   [][]byte
   574  		)
   575  		// The values
   576  		for _, entry := range t.accountValues {
   577  			if bytes.Compare(entry.k, origin[:]) < 0 {
   578  				continue
   579  			}
   580  			if bytes.Compare(entry.k, limit[:]) > 0 {
   581  				continue
   582  			}
   583  			keys = append(keys, common.BytesToHash(entry.k))
   584  			vals = append(vals, entry.v)
   585  		}
   586  		// The proofs
   587  		proof := light.NewNodeSet()
   588  		if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   589  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   590  		}
   591  		// The bloat: add proof of every single element
   592  		for _, entry := range t.accountValues {
   593  			if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
   594  				t.logger.Error("Could not prove item", "error", err)
   595  			}
   596  		}
   597  		// And remove one item from the elements
   598  		if len(keys) > 2 {
   599  			keys = append(keys[:1], keys[2:]...)
   600  			vals = append(vals[:1], vals[2:]...)
   601  		}
   602  		for _, blob := range proof.NodeList() {
   603  			proofs = append(proofs, blob)
   604  		}
   605  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   606  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   607  			t.term()
   608  			// This is actually correct, signal to exit the test successfully
   609  		}
   610  		return nil
   611  	}
   612  	syncer := setupSyncer(source)
   613  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   614  		t.Fatal("No error returned from incomplete/cancelled sync")
   615  	}
   616  }
   617  
   618  func setupSyncer(peers ...*testPeer) *Syncer {
   619  	stateDb := rawdb.NewMemoryDatabase()
   620  	syncer := NewSyncer(stateDb)
   621  	for _, peer := range peers {
   622  		syncer.Register(peer)
   623  		peer.remote = syncer
   624  	}
   625  	return syncer
   626  }
   627  
   628  // TestSync tests a basic sync with one peer
   629  func TestSync(t *testing.T) {
   630  	t.Parallel()
   631  
   632  	var (
   633  		once   sync.Once
   634  		cancel = make(chan struct{})
   635  		term   = func() {
   636  			once.Do(func() {
   637  				close(cancel)
   638  			})
   639  		}
   640  	)
   641  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   642  
   643  	mkSource := func(name string) *testPeer {
   644  		source := newTestPeer(name, t, term)
   645  		source.accountTrie = sourceAccountTrie
   646  		source.accountValues = elems
   647  		return source
   648  	}
   649  	syncer := setupSyncer(mkSource("source"))
   650  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   651  		t.Fatalf("sync failed: %v", err)
   652  	}
   653  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   654  }
   655  
   656  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   657  // panic within the prover
   658  func TestSyncTinyTriePanic(t *testing.T) {
   659  	t.Parallel()
   660  
   661  	var (
   662  		once   sync.Once
   663  		cancel = make(chan struct{})
   664  		term   = func() {
   665  			once.Do(func() {
   666  				close(cancel)
   667  			})
   668  		}
   669  	)
   670  	sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
   671  
   672  	mkSource := func(name string) *testPeer {
   673  		source := newTestPeer(name, t, term)
   674  		source.accountTrie = sourceAccountTrie
   675  		source.accountValues = elems
   676  		return source
   677  	}
   678  	syncer := setupSyncer(mkSource("source"))
   679  	done := checkStall(t, term)
   680  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   681  		t.Fatalf("sync failed: %v", err)
   682  	}
   683  	close(done)
   684  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   685  }
   686  
   687  // TestMultiSync tests a basic sync with multiple peers
   688  func TestMultiSync(t *testing.T) {
   689  	t.Parallel()
   690  
   691  	var (
   692  		once   sync.Once
   693  		cancel = make(chan struct{})
   694  		term   = func() {
   695  			once.Do(func() {
   696  				close(cancel)
   697  			})
   698  		}
   699  	)
   700  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   701  
   702  	mkSource := func(name string) *testPeer {
   703  		source := newTestPeer(name, t, term)
   704  		source.accountTrie = sourceAccountTrie
   705  		source.accountValues = elems
   706  		return source
   707  	}
   708  	syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
   709  	done := checkStall(t, term)
   710  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   711  		t.Fatalf("sync failed: %v", err)
   712  	}
   713  	close(done)
   714  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   715  }
   716  
   717  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   718  func TestSyncWithStorage(t *testing.T) {
   719  	t.Parallel()
   720  
   721  	var (
   722  		once   sync.Once
   723  		cancel = make(chan struct{})
   724  		term   = func() {
   725  			once.Do(func() {
   726  				close(cancel)
   727  			})
   728  		}
   729  	)
   730  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
   731  
   732  	mkSource := func(name string) *testPeer {
   733  		source := newTestPeer(name, t, term)
   734  		source.accountTrie = sourceAccountTrie
   735  		source.accountValues = elems
   736  		source.storageTries = storageTries
   737  		source.storageValues = storageElems
   738  		return source
   739  	}
   740  	syncer := setupSyncer(mkSource("sourceA"))
   741  	done := checkStall(t, term)
   742  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   743  		t.Fatalf("sync failed: %v", err)
   744  	}
   745  	close(done)
   746  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   747  }
   748  
   749  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   750  func TestMultiSyncManyUseless(t *testing.T) {
   751  	t.Parallel()
   752  
   753  	var (
   754  		once   sync.Once
   755  		cancel = make(chan struct{})
   756  		term   = func() {
   757  			once.Do(func() {
   758  				close(cancel)
   759  			})
   760  		}
   761  	)
   762  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   763  
   764  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   765  		source := newTestPeer(name, t, term)
   766  		source.accountTrie = sourceAccountTrie
   767  		source.accountValues = elems
   768  		source.storageTries = storageTries
   769  		source.storageValues = storageElems
   770  
   771  		if !noAccount {
   772  			source.accountRequestHandler = emptyRequestAccountRangeFn
   773  		}
   774  		if !noStorage {
   775  			source.storageRequestHandler = emptyStorageRequestHandler
   776  		}
   777  		if !noTrieNode {
   778  			source.trieRequestHandler = emptyTrieRequestHandler
   779  		}
   780  		return source
   781  	}
   782  
   783  	syncer := setupSyncer(
   784  		mkSource("full", true, true, true),
   785  		mkSource("noAccounts", false, true, true),
   786  		mkSource("noStorage", true, false, true),
   787  		mkSource("noTrie", true, true, false),
   788  	)
   789  	done := checkStall(t, term)
   790  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   791  		t.Fatalf("sync failed: %v", err)
   792  	}
   793  	close(done)
   794  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   795  }
   796  
   797  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   798  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   799  	// We're setting the timeout to very low, to increase the chance of the timeout
   800  	// being triggered. This was previously a cause of panic, when a response
   801  	// arrived simultaneously as a timeout was triggered.
   802  	defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
   803  	requestTimeout = time.Millisecond
   804  
   805  	var (
   806  		once   sync.Once
   807  		cancel = make(chan struct{})
   808  		term   = func() {
   809  			once.Do(func() {
   810  				close(cancel)
   811  			})
   812  		}
   813  	)
   814  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   815  
   816  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   817  		source := newTestPeer(name, t, term)
   818  		source.accountTrie = sourceAccountTrie
   819  		source.accountValues = elems
   820  		source.storageTries = storageTries
   821  		source.storageValues = storageElems
   822  
   823  		if !noAccount {
   824  			source.accountRequestHandler = emptyRequestAccountRangeFn
   825  		}
   826  		if !noStorage {
   827  			source.storageRequestHandler = emptyStorageRequestHandler
   828  		}
   829  		if !noTrieNode {
   830  			source.trieRequestHandler = emptyTrieRequestHandler
   831  		}
   832  		return source
   833  	}
   834  
   835  	syncer := setupSyncer(
   836  		mkSource("full", true, true, true),
   837  		mkSource("noAccounts", false, true, true),
   838  		mkSource("noStorage", true, false, true),
   839  		mkSource("noTrie", true, true, false),
   840  	)
   841  	done := checkStall(t, term)
   842  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   843  		t.Fatalf("sync failed: %v", err)
   844  	}
   845  	close(done)
   846  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   847  }
   848  
   849  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   850  func TestMultiSyncManyUnresponsive(t *testing.T) {
   851  	// We're setting the timeout to very low, to make the test run a bit faster
   852  	defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
   853  	requestTimeout = time.Millisecond
   854  
   855  	var (
   856  		once   sync.Once
   857  		cancel = make(chan struct{})
   858  		term   = func() {
   859  			once.Do(func() {
   860  				close(cancel)
   861  			})
   862  		}
   863  	)
   864  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   865  
   866  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   867  		source := newTestPeer(name, t, term)
   868  		source.accountTrie = sourceAccountTrie
   869  		source.accountValues = elems
   870  		source.storageTries = storageTries
   871  		source.storageValues = storageElems
   872  
   873  		if !noAccount {
   874  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   875  		}
   876  		if !noStorage {
   877  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   878  		}
   879  		if !noTrieNode {
   880  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   881  		}
   882  		return source
   883  	}
   884  
   885  	syncer := setupSyncer(
   886  		mkSource("full", true, true, true),
   887  		mkSource("noAccounts", false, true, true),
   888  		mkSource("noStorage", true, false, true),
   889  		mkSource("noTrie", true, true, false),
   890  	)
   891  	done := checkStall(t, term)
   892  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   893  		t.Fatalf("sync failed: %v", err)
   894  	}
   895  	close(done)
   896  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   897  }
   898  
   899  func checkStall(t *testing.T, term func()) chan struct{} {
   900  	testDone := make(chan struct{})
   901  	go func() {
   902  		select {
   903  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   904  			t.Log("Sync stalled")
   905  			term()
   906  		case <-testDone:
   907  			return
   908  		}
   909  	}()
   910  	return testDone
   911  }
   912  
   913  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   914  // account trie has a few boundary elements.
   915  func TestSyncBoundaryAccountTrie(t *testing.T) {
   916  	t.Parallel()
   917  
   918  	var (
   919  		once   sync.Once
   920  		cancel = make(chan struct{})
   921  		term   = func() {
   922  			once.Do(func() {
   923  				close(cancel)
   924  			})
   925  		}
   926  	)
   927  	sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
   928  
   929  	mkSource := func(name string) *testPeer {
   930  		source := newTestPeer(name, t, term)
   931  		source.accountTrie = sourceAccountTrie
   932  		source.accountValues = elems
   933  		return source
   934  	}
   935  	syncer := setupSyncer(
   936  		mkSource("peer-a"),
   937  		mkSource("peer-b"),
   938  	)
   939  	done := checkStall(t, term)
   940  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   941  		t.Fatalf("sync failed: %v", err)
   942  	}
   943  	close(done)
   944  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   945  }
   946  
   947  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
   948  // consistently returning very small results
   949  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
   950  	t.Parallel()
   951  
   952  	var (
   953  		once   sync.Once
   954  		cancel = make(chan struct{})
   955  		term   = func() {
   956  			once.Do(func() {
   957  				close(cancel)
   958  			})
   959  		}
   960  	)
   961  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   962  
   963  	mkSource := func(name string, slow bool) *testPeer {
   964  		source := newTestPeer(name, t, term)
   965  		source.accountTrie = sourceAccountTrie
   966  		source.accountValues = elems
   967  
   968  		if slow {
   969  			source.accountRequestHandler = starvingAccountRequestHandler
   970  		}
   971  		return source
   972  	}
   973  
   974  	syncer := setupSyncer(
   975  		mkSource("nice-a", false),
   976  		mkSource("nice-b", false),
   977  		mkSource("nice-c", false),
   978  		mkSource("capped", true),
   979  	)
   980  	done := checkStall(t, term)
   981  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   982  		t.Fatalf("sync failed: %v", err)
   983  	}
   984  	close(done)
   985  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   986  }
   987  
   988  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
   989  // code requests properly.
   990  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
   991  	t.Parallel()
   992  
   993  	var (
   994  		once   sync.Once
   995  		cancel = make(chan struct{})
   996  		term   = func() {
   997  			once.Do(func() {
   998  				close(cancel)
   999  			})
  1000  		}
  1001  	)
  1002  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1003  
  1004  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1005  		source := newTestPeer(name, t, term)
  1006  		source.accountTrie = sourceAccountTrie
  1007  		source.accountValues = elems
  1008  		source.codeRequestHandler = codeFn
  1009  		return source
  1010  	}
  1011  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1012  	// chance that the full set of codes requested are sent only to the
  1013  	// non-corrupt peer, which delivers everything in one go, and makes the
  1014  	// test moot
  1015  	syncer := setupSyncer(
  1016  		mkSource("capped", cappedCodeRequestHandler),
  1017  		mkSource("corrupt", corruptCodeRequestHandler),
  1018  	)
  1019  	done := checkStall(t, term)
  1020  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1021  		t.Fatalf("sync failed: %v", err)
  1022  	}
  1023  	close(done)
  1024  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1025  }
  1026  
  1027  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1028  	t.Parallel()
  1029  
  1030  	var (
  1031  		once   sync.Once
  1032  		cancel = make(chan struct{})
  1033  		term   = func() {
  1034  			once.Do(func() {
  1035  				close(cancel)
  1036  			})
  1037  		}
  1038  	)
  1039  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1040  
  1041  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1042  		source := newTestPeer(name, t, term)
  1043  		source.accountTrie = sourceAccountTrie
  1044  		source.accountValues = elems
  1045  		source.accountRequestHandler = accFn
  1046  		return source
  1047  	}
  1048  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1049  	// chance that the full set of codes requested are sent only to the
  1050  	// non-corrupt peer, which delivers everything in one go, and makes the
  1051  	// test moot
  1052  	syncer := setupSyncer(
  1053  		mkSource("capped", defaultAccountRequestHandler),
  1054  		mkSource("corrupt", corruptAccountRequestHandler),
  1055  	)
  1056  	done := checkStall(t, term)
  1057  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1058  		t.Fatalf("sync failed: %v", err)
  1059  	}
  1060  	close(done)
  1061  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1062  }
  1063  
  1064  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1065  // one by one
  1066  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1067  	t.Parallel()
  1068  
  1069  	var (
  1070  		once   sync.Once
  1071  		cancel = make(chan struct{})
  1072  		term   = func() {
  1073  			once.Do(func() {
  1074  				close(cancel)
  1075  			})
  1076  		}
  1077  	)
  1078  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1079  
  1080  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1081  		source := newTestPeer(name, t, term)
  1082  		source.accountTrie = sourceAccountTrie
  1083  		source.accountValues = elems
  1084  		source.codeRequestHandler = codeFn
  1085  		return source
  1086  	}
  1087  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1088  	// so it shouldn't be more than that
  1089  	var counter int
  1090  	syncer := setupSyncer(
  1091  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1092  			counter++
  1093  			return cappedCodeRequestHandler(t, id, hashes, max)
  1094  		}),
  1095  	)
  1096  	done := checkStall(t, term)
  1097  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1098  		t.Fatalf("sync failed: %v", err)
  1099  	}
  1100  	close(done)
  1101  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1102  	// deduplication is per request batch. If it were a perfect global dedup,
  1103  	// we would expect only 8 requests. If there were no dedup, there would be
  1104  	// 3k requests.
  1105  	// We expect somewhere below 100 requests for these 8 unique hashes.
  1106  	if threshold := 100; counter > threshold {
  1107  		t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
  1108  	}
  1109  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1110  }
  1111  
  1112  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1113  // storage trie has a few boundary elements.
  1114  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1115  	t.Parallel()
  1116  
  1117  	var (
  1118  		once   sync.Once
  1119  		cancel = make(chan struct{})
  1120  		term   = func() {
  1121  			once.Do(func() {
  1122  				close(cancel)
  1123  			})
  1124  		}
  1125  	)
  1126  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1127  
  1128  	mkSource := func(name string) *testPeer {
  1129  		source := newTestPeer(name, t, term)
  1130  		source.accountTrie = sourceAccountTrie
  1131  		source.accountValues = elems
  1132  		source.storageTries = storageTries
  1133  		source.storageValues = storageElems
  1134  		return source
  1135  	}
  1136  	syncer := setupSyncer(
  1137  		mkSource("peer-a"),
  1138  		mkSource("peer-b"),
  1139  	)
  1140  	done := checkStall(t, term)
  1141  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1142  		t.Fatalf("sync failed: %v", err)
  1143  	}
  1144  	close(done)
  1145  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1146  }
  1147  
  1148  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1149  // consistently returning very small results
  1150  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1151  	t.Parallel()
  1152  
  1153  	var (
  1154  		once   sync.Once
  1155  		cancel = make(chan struct{})
  1156  		term   = func() {
  1157  			once.Do(func() {
  1158  				close(cancel)
  1159  			})
  1160  		}
  1161  	)
  1162  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1163  
  1164  	mkSource := func(name string, slow bool) *testPeer {
  1165  		source := newTestPeer(name, t, term)
  1166  		source.accountTrie = sourceAccountTrie
  1167  		source.accountValues = elems
  1168  		source.storageTries = storageTries
  1169  		source.storageValues = storageElems
  1170  
  1171  		if slow {
  1172  			source.storageRequestHandler = starvingStorageRequestHandler
  1173  		}
  1174  		return source
  1175  	}
  1176  
  1177  	syncer := setupSyncer(
  1178  		mkSource("nice-a", false),
  1179  		mkSource("slow", true),
  1180  	)
  1181  	done := checkStall(t, term)
  1182  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1183  		t.Fatalf("sync failed: %v", err)
  1184  	}
  1185  	close(done)
  1186  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1187  }
  1188  
  1189  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1190  // sometimes sending bad proofs
  1191  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1192  	t.Parallel()
  1193  
  1194  	var (
  1195  		once   sync.Once
  1196  		cancel = make(chan struct{})
  1197  		term   = func() {
  1198  			once.Do(func() {
  1199  				close(cancel)
  1200  			})
  1201  		}
  1202  	)
  1203  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1204  
  1205  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1206  		source := newTestPeer(name, t, term)
  1207  		source.accountTrie = sourceAccountTrie
  1208  		source.accountValues = elems
  1209  		source.storageTries = storageTries
  1210  		source.storageValues = storageElems
  1211  		source.storageRequestHandler = handler
  1212  		return source
  1213  	}
  1214  
  1215  	syncer := setupSyncer(
  1216  		mkSource("nice-a", defaultStorageRequestHandler),
  1217  		mkSource("nice-b", defaultStorageRequestHandler),
  1218  		mkSource("nice-c", defaultStorageRequestHandler),
  1219  		mkSource("corrupt", corruptStorageRequestHandler),
  1220  	)
  1221  	done := checkStall(t, term)
  1222  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1223  		t.Fatalf("sync failed: %v", err)
  1224  	}
  1225  	close(done)
  1226  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1227  }
  1228  
  1229  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1230  	t.Parallel()
  1231  
  1232  	var (
  1233  		once   sync.Once
  1234  		cancel = make(chan struct{})
  1235  		term   = func() {
  1236  			once.Do(func() {
  1237  				close(cancel)
  1238  			})
  1239  		}
  1240  	)
  1241  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1242  
  1243  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1244  		source := newTestPeer(name, t, term)
  1245  		source.accountTrie = sourceAccountTrie
  1246  		source.accountValues = elems
  1247  		source.storageTries = storageTries
  1248  		source.storageValues = storageElems
  1249  		source.storageRequestHandler = handler
  1250  		return source
  1251  	}
  1252  	syncer := setupSyncer(
  1253  		mkSource("nice-a", defaultStorageRequestHandler),
  1254  		mkSource("nice-b", defaultStorageRequestHandler),
  1255  		mkSource("nice-c", defaultStorageRequestHandler),
  1256  		mkSource("corrupt", noProofStorageRequestHandler),
  1257  	)
  1258  	done := checkStall(t, term)
  1259  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1260  		t.Fatalf("sync failed: %v", err)
  1261  	}
  1262  	close(done)
  1263  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1264  }
  1265  
  1266  // TestSyncWithStorage tests  basic sync using accounts + storage + code, against
  1267  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1268  // an error, where the recipient erroneously clipped the boundary nodes, but
  1269  // did not mark the account for healing.
  1270  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1271  	t.Parallel()
  1272  	var (
  1273  		once   sync.Once
  1274  		cancel = make(chan struct{})
  1275  		term   = func() {
  1276  			once.Do(func() {
  1277  				close(cancel)
  1278  			})
  1279  		}
  1280  	)
  1281  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1282  
  1283  	mkSource := func(name string) *testPeer {
  1284  		source := newTestPeer(name, t, term)
  1285  		source.accountTrie = sourceAccountTrie
  1286  		source.accountValues = elems
  1287  		source.storageTries = storageTries
  1288  		source.storageValues = storageElems
  1289  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1290  		return source
  1291  	}
  1292  	syncer := setupSyncer(mkSource("sourceA"))
  1293  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1294  		t.Fatalf("sync failed: %v", err)
  1295  	}
  1296  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1297  }
  1298  
  1299  type kv struct {
  1300  	k, v []byte
  1301  }
  1302  
  1303  // Some helpers for sorting
  1304  type entrySlice []*kv
  1305  
  1306  func (p entrySlice) Len() int           { return len(p) }
  1307  func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1308  func (p entrySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
  1309  
  1310  func key32(i uint64) []byte {
  1311  	key := make([]byte, 32)
  1312  	binary.LittleEndian.PutUint64(key, i)
  1313  	return key
  1314  }
  1315  
  1316  var (
  1317  	codehashes = []common.Hash{
  1318  		crypto.Keccak256Hash([]byte{0}),
  1319  		crypto.Keccak256Hash([]byte{1}),
  1320  		crypto.Keccak256Hash([]byte{2}),
  1321  		crypto.Keccak256Hash([]byte{3}),
  1322  		crypto.Keccak256Hash([]byte{4}),
  1323  		crypto.Keccak256Hash([]byte{5}),
  1324  		crypto.Keccak256Hash([]byte{6}),
  1325  		crypto.Keccak256Hash([]byte{7}),
  1326  	}
  1327  )
  1328  
  1329  // getCodeHash returns a pseudo-random code hash
  1330  func getCodeHash(i uint64) []byte {
  1331  	h := codehashes[int(i)%len(codehashes)]
  1332  	return common.CopyBytes(h[:])
  1333  }
  1334  
  1335  // getCodeByHash convenience function to lookup the code from the code hash
  1336  func getCodeByHash(hash common.Hash) []byte {
  1337  	if hash == emptyCode {
  1338  		return nil
  1339  	}
  1340  	for i, h := range codehashes {
  1341  		if h == hash {
  1342  			return []byte{byte(i)}
  1343  		}
  1344  	}
  1345  	return nil
  1346  }
  1347  
  1348  // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1349  func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
  1350  	db := trie.NewDatabase(rawdb.NewMemoryDatabase())
  1351  	accTrie, _ := trie.New(common.Hash{}, db)
  1352  	var entries entrySlice
  1353  	for i := uint64(1); i <= uint64(n); i++ {
  1354  		value, _ := rlp.EncodeToBytes(state.Account{
  1355  			Nonce:    i,
  1356  			Balance:  big.NewInt(int64(i)),
  1357  			Root:     emptyRoot,
  1358  			CodeHash: getCodeHash(i),
  1359  		})
  1360  		key := key32(i)
  1361  		elem := &kv{key, value}
  1362  		accTrie.Update(elem.k, elem.v)
  1363  		entries = append(entries, elem)
  1364  	}
  1365  	sort.Sort(entries)
  1366  	accTrie.Commit(nil)
  1367  	return accTrie, entries
  1368  }
  1369  
  1370  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1371  // accounts normally, this function will fill a few accounts which have
  1372  // boundary hash.
  1373  func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
  1374  	var (
  1375  		entries    entrySlice
  1376  		boundaries []common.Hash
  1377  
  1378  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1379  		trie, _ = trie.New(common.Hash{}, db)
  1380  	)
  1381  	// Initialize boundaries
  1382  	var next common.Hash
  1383  	step := new(big.Int).Sub(
  1384  		new(big.Int).Div(
  1385  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1386  			big.NewInt(int64(accountConcurrency)),
  1387  		), common.Big1,
  1388  	)
  1389  	for i := 0; i < accountConcurrency; i++ {
  1390  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1391  		if i == accountConcurrency-1 {
  1392  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1393  		}
  1394  		boundaries = append(boundaries, last)
  1395  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1396  	}
  1397  	// Fill boundary accounts
  1398  	for i := 0; i < len(boundaries); i++ {
  1399  		value, _ := rlp.EncodeToBytes(state.Account{
  1400  			Nonce:    uint64(0),
  1401  			Balance:  big.NewInt(int64(i)),
  1402  			Root:     emptyRoot,
  1403  			CodeHash: getCodeHash(uint64(i)),
  1404  		})
  1405  		elem := &kv{boundaries[i].Bytes(), value}
  1406  		trie.Update(elem.k, elem.v)
  1407  		entries = append(entries, elem)
  1408  	}
  1409  	// Fill other accounts if required
  1410  	for i := uint64(1); i <= uint64(n); i++ {
  1411  		value, _ := rlp.EncodeToBytes(state.Account{
  1412  			Nonce:    i,
  1413  			Balance:  big.NewInt(int64(i)),
  1414  			Root:     emptyRoot,
  1415  			CodeHash: getCodeHash(i),
  1416  		})
  1417  		elem := &kv{key32(i), value}
  1418  		trie.Update(elem.k, elem.v)
  1419  		entries = append(entries, elem)
  1420  	}
  1421  	sort.Sort(entries)
  1422  	trie.Commit(nil)
  1423  	return trie, entries
  1424  }
  1425  
  1426  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1427  // has a unique storage set.
  1428  func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1429  	var (
  1430  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1431  		accTrie, _     = trie.New(common.Hash{}, db)
  1432  		entries        entrySlice
  1433  		storageTries   = make(map[common.Hash]*trie.Trie)
  1434  		storageEntries = make(map[common.Hash]entrySlice)
  1435  	)
  1436  	// Create n accounts in the trie
  1437  	for i := uint64(1); i <= uint64(accounts); i++ {
  1438  		key := key32(i)
  1439  		codehash := emptyCode[:]
  1440  		if code {
  1441  			codehash = getCodeHash(i)
  1442  		}
  1443  		// Create a storage trie
  1444  		stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
  1445  		stRoot := stTrie.Hash()
  1446  		stTrie.Commit(nil)
  1447  		value, _ := rlp.EncodeToBytes(state.Account{
  1448  			Nonce:    i,
  1449  			Balance:  big.NewInt(int64(i)),
  1450  			Root:     stRoot,
  1451  			CodeHash: codehash,
  1452  		})
  1453  		elem := &kv{key, value}
  1454  		accTrie.Update(elem.k, elem.v)
  1455  		entries = append(entries, elem)
  1456  
  1457  		storageTries[common.BytesToHash(key)] = stTrie
  1458  		storageEntries[common.BytesToHash(key)] = stEntries
  1459  	}
  1460  	sort.Sort(entries)
  1461  
  1462  	accTrie.Commit(nil)
  1463  	return accTrie, entries, storageTries, storageEntries
  1464  }
  1465  
  1466  // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1467  func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1468  	var (
  1469  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1470  		accTrie, _     = trie.New(common.Hash{}, db)
  1471  		entries        entrySlice
  1472  		storageTries   = make(map[common.Hash]*trie.Trie)
  1473  		storageEntries = make(map[common.Hash]entrySlice)
  1474  	)
  1475  	// Make a storage trie which we reuse for the whole lot
  1476  	var (
  1477  		stTrie    *trie.Trie
  1478  		stEntries entrySlice
  1479  	)
  1480  	if boundary {
  1481  		stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
  1482  	} else {
  1483  		stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
  1484  	}
  1485  	stRoot := stTrie.Hash()
  1486  
  1487  	// Create n accounts in the trie
  1488  	for i := uint64(1); i <= uint64(accounts); i++ {
  1489  		key := key32(i)
  1490  		codehash := emptyCode[:]
  1491  		if code {
  1492  			codehash = getCodeHash(i)
  1493  		}
  1494  		value, _ := rlp.EncodeToBytes(state.Account{
  1495  			Nonce:    i,
  1496  			Balance:  big.NewInt(int64(i)),
  1497  			Root:     stRoot,
  1498  			CodeHash: codehash,
  1499  		})
  1500  		elem := &kv{key, value}
  1501  		accTrie.Update(elem.k, elem.v)
  1502  		entries = append(entries, elem)
  1503  		// we reuse the same one for all accounts
  1504  		storageTries[common.BytesToHash(key)] = stTrie
  1505  		storageEntries[common.BytesToHash(key)] = stEntries
  1506  	}
  1507  	sort.Sort(entries)
  1508  	stTrie.Commit(nil)
  1509  	accTrie.Commit(nil)
  1510  	return accTrie, entries, storageTries, storageEntries
  1511  }
  1512  
  1513  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1514  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1515  // that tries are unique.
  1516  func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
  1517  	trie, _ := trie.New(common.Hash{}, db)
  1518  	var entries entrySlice
  1519  	for i := uint64(1); i <= n; i++ {
  1520  		// store 'x' at slot 'x'
  1521  		slotValue := key32(i + seed)
  1522  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1523  
  1524  		slotKey := key32(i)
  1525  		key := crypto.Keccak256Hash(slotKey[:])
  1526  
  1527  		elem := &kv{key[:], rlpSlotValue}
  1528  		trie.Update(elem.k, elem.v)
  1529  		entries = append(entries, elem)
  1530  	}
  1531  	sort.Sort(entries)
  1532  	trie.Commit(nil)
  1533  	return trie, entries
  1534  }
  1535  
  1536  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1537  // storage slots normally, this function will fill a few slots which have
  1538  // boundary hash.
  1539  func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
  1540  	var (
  1541  		entries    entrySlice
  1542  		boundaries []common.Hash
  1543  		trie, _    = trie.New(common.Hash{}, db)
  1544  	)
  1545  	// Initialize boundaries
  1546  	var next common.Hash
  1547  	step := new(big.Int).Sub(
  1548  		new(big.Int).Div(
  1549  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1550  			big.NewInt(int64(accountConcurrency)),
  1551  		), common.Big1,
  1552  	)
  1553  	for i := 0; i < accountConcurrency; i++ {
  1554  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1555  		if i == accountConcurrency-1 {
  1556  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1557  		}
  1558  		boundaries = append(boundaries, last)
  1559  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1560  	}
  1561  	// Fill boundary slots
  1562  	for i := 0; i < len(boundaries); i++ {
  1563  		key := boundaries[i]
  1564  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1565  
  1566  		elem := &kv{key[:], val}
  1567  		trie.Update(elem.k, elem.v)
  1568  		entries = append(entries, elem)
  1569  	}
  1570  	// Fill other slots if required
  1571  	for i := uint64(1); i <= uint64(n); i++ {
  1572  		slotKey := key32(i)
  1573  		key := crypto.Keccak256Hash(slotKey[:])
  1574  
  1575  		slotValue := key32(i)
  1576  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1577  
  1578  		elem := &kv{key[:], rlpSlotValue}
  1579  		trie.Update(elem.k, elem.v)
  1580  		entries = append(entries, elem)
  1581  	}
  1582  	sort.Sort(entries)
  1583  	trie.Commit(nil)
  1584  	return trie, entries
  1585  }
  1586  
  1587  func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1588  	t.Helper()
  1589  	triedb := trie.NewDatabase(db)
  1590  	accTrie, err := trie.New(root, triedb)
  1591  	if err != nil {
  1592  		t.Fatal(err)
  1593  	}
  1594  	accounts, slots := 0, 0
  1595  	accIt := trie.NewIterator(accTrie.NodeIterator(nil))
  1596  	for accIt.Next() {
  1597  		var acc struct {
  1598  			Nonce    uint64
  1599  			Balance  *big.Int
  1600  			Root     common.Hash
  1601  			CodeHash []byte
  1602  		}
  1603  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1604  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1605  		}
  1606  		accounts++
  1607  		if acc.Root != emptyRoot {
  1608  			storeTrie, err := trie.NewSecure(acc.Root, triedb)
  1609  			if err != nil {
  1610  				t.Fatal(err)
  1611  			}
  1612  			storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
  1613  			for storeIt.Next() {
  1614  				slots++
  1615  			}
  1616  			if err := storeIt.Err; err != nil {
  1617  				t.Fatal(err)
  1618  			}
  1619  		}
  1620  	}
  1621  	if err := accIt.Err; err != nil {
  1622  		t.Fatal(err)
  1623  	}
  1624  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1625  }
  1626  
  1627  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1628  // state healing
  1629  func TestSyncAccountPerformance(t *testing.T) {
  1630  	// Set the account concurrency to 1. This _should_ result in the
  1631  	// range root to become correct, and there should be no healing needed
  1632  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1633  	accountConcurrency = 1
  1634  
  1635  	var (
  1636  		once   sync.Once
  1637  		cancel = make(chan struct{})
  1638  		term   = func() {
  1639  			once.Do(func() {
  1640  				close(cancel)
  1641  			})
  1642  		}
  1643  	)
  1644  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  1645  
  1646  	mkSource := func(name string) *testPeer {
  1647  		source := newTestPeer(name, t, term)
  1648  		source.accountTrie = sourceAccountTrie
  1649  		source.accountValues = elems
  1650  		return source
  1651  	}
  1652  	src := mkSource("source")
  1653  	syncer := setupSyncer(src)
  1654  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1655  		t.Fatalf("sync failed: %v", err)
  1656  	}
  1657  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1658  	// The trie root will always be requested, since it is added when the snap
  1659  	// sync cycle starts. When popping the queue, we do not look it up again.
  1660  	// Doing so would bring this number down to zero in this artificial testcase,
  1661  	// but only add extra IO for no reason in practice.
  1662  	if have, want := src.nTrienodeRequests, 1; have != want {
  1663  		fmt.Printf(src.Stats())
  1664  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1665  	}
  1666  }
  1667  
  1668  func TestSlotEstimation(t *testing.T) {
  1669  	for i, tc := range []struct {
  1670  		last  common.Hash
  1671  		count int
  1672  		want  uint64
  1673  	}{
  1674  		{
  1675  			// Half the space
  1676  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1677  			100,
  1678  			100,
  1679  		},
  1680  		{
  1681  			// 1 / 16th
  1682  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1683  			100,
  1684  			1500,
  1685  		},
  1686  		{
  1687  			// Bit more than 1 / 16th
  1688  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1689  			100,
  1690  			1499,
  1691  		},
  1692  		{
  1693  			// Almost everything
  1694  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1695  			100,
  1696  			6,
  1697  		},
  1698  		{
  1699  			// Almost nothing -- should lead to error
  1700  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1701  			1,
  1702  			0,
  1703  		},
  1704  		{
  1705  			// Nothing -- should lead to error
  1706  			common.Hash{},
  1707  			100,
  1708  			0,
  1709  		},
  1710  	} {
  1711  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1712  		if want := tc.want; have != want {
  1713  			t.Errorf("test %d: have %d want %d", i, have, want)
  1714  		}
  1715  	}
  1716  }