github.com/tirogen/go-ethereum@v1.10.12-0.20221226051715-250cfede41b6/eth/protocols/snap/sync_test.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	"sort"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/tirogen/go-ethereum/common"
    31  	"github.com/tirogen/go-ethereum/core/rawdb"
    32  	"github.com/tirogen/go-ethereum/core/types"
    33  	"github.com/tirogen/go-ethereum/crypto"
    34  	"github.com/tirogen/go-ethereum/ethdb"
    35  	"github.com/tirogen/go-ethereum/light"
    36  	"github.com/tirogen/go-ethereum/log"
    37  	"github.com/tirogen/go-ethereum/rlp"
    38  	"github.com/tirogen/go-ethereum/trie"
    39  	"golang.org/x/crypto/sha3"
    40  )
    41  
    42  func TestHashing(t *testing.T) {
    43  	t.Parallel()
    44  
    45  	var bytecodes = make([][]byte, 10)
    46  	for i := 0; i < len(bytecodes); i++ {
    47  		buf := make([]byte, 100)
    48  		rand.Read(buf)
    49  		bytecodes[i] = buf
    50  	}
    51  	var want, got string
    52  	var old = func() {
    53  		hasher := sha3.NewLegacyKeccak256()
    54  		for i := 0; i < len(bytecodes); i++ {
    55  			hasher.Reset()
    56  			hasher.Write(bytecodes[i])
    57  			hash := hasher.Sum(nil)
    58  			got = fmt.Sprintf("%v\n%v", got, hash)
    59  		}
    60  	}
    61  	var new = func() {
    62  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    63  		var hash = make([]byte, 32)
    64  		for i := 0; i < len(bytecodes); i++ {
    65  			hasher.Reset()
    66  			hasher.Write(bytecodes[i])
    67  			hasher.Read(hash)
    68  			want = fmt.Sprintf("%v\n%v", want, hash)
    69  		}
    70  	}
    71  	old()
    72  	new()
    73  	if want != got {
    74  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    75  	}
    76  }
    77  
    78  func BenchmarkHashing(b *testing.B) {
    79  	var bytecodes = make([][]byte, 10000)
    80  	for i := 0; i < len(bytecodes); i++ {
    81  		buf := make([]byte, 100)
    82  		rand.Read(buf)
    83  		bytecodes[i] = buf
    84  	}
    85  	var old = func() {
    86  		hasher := sha3.NewLegacyKeccak256()
    87  		for i := 0; i < len(bytecodes); i++ {
    88  			hasher.Reset()
    89  			hasher.Write(bytecodes[i])
    90  			hasher.Sum(nil)
    91  		}
    92  	}
    93  	var new = func() {
    94  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    95  		var hash = make([]byte, 32)
    96  		for i := 0; i < len(bytecodes); i++ {
    97  			hasher.Reset()
    98  			hasher.Write(bytecodes[i])
    99  			hasher.Read(hash)
   100  		}
   101  	}
   102  	b.Run("old", func(b *testing.B) {
   103  		b.ReportAllocs()
   104  		for i := 0; i < b.N; i++ {
   105  			old()
   106  		}
   107  	})
   108  	b.Run("new", func(b *testing.B) {
   109  		b.ReportAllocs()
   110  		for i := 0; i < b.N; i++ {
   111  			new()
   112  		}
   113  	})
   114  }
   115  
   116  type (
   117  	accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
   118  	storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   119  	trieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   120  	codeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   121  )
   122  
   123  type testPeer struct {
   124  	id            string
   125  	test          *testing.T
   126  	remote        *Syncer
   127  	logger        log.Logger
   128  	accountTrie   *trie.Trie
   129  	accountValues entrySlice
   130  	storageTries  map[common.Hash]*trie.Trie
   131  	storageValues map[common.Hash]entrySlice
   132  
   133  	accountRequestHandler accountHandlerFunc
   134  	storageRequestHandler storageHandlerFunc
   135  	trieRequestHandler    trieHandlerFunc
   136  	codeRequestHandler    codeHandlerFunc
   137  	term                  func()
   138  
   139  	// counters
   140  	nAccountRequests  int
   141  	nStorageRequests  int
   142  	nBytecodeRequests int
   143  	nTrienodeRequests int
   144  }
   145  
   146  func newTestPeer(id string, t *testing.T, term func()) *testPeer {
   147  	peer := &testPeer{
   148  		id:                    id,
   149  		test:                  t,
   150  		logger:                log.New("id", id),
   151  		accountRequestHandler: defaultAccountRequestHandler,
   152  		trieRequestHandler:    defaultTrieRequestHandler,
   153  		storageRequestHandler: defaultStorageRequestHandler,
   154  		codeRequestHandler:    defaultCodeRequestHandler,
   155  		term:                  term,
   156  	}
   157  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   158  	//peer.logger.SetHandler(stderrHandler)
   159  	return peer
   160  }
   161  
   162  func (t *testPeer) setStorageTries(tries map[common.Hash]*trie.Trie) {
   163  	t.storageTries = make(map[common.Hash]*trie.Trie)
   164  	for root, trie := range tries {
   165  		t.storageTries[root] = trie.Copy()
   166  	}
   167  }
   168  
   169  func (t *testPeer) ID() string      { return t.id }
   170  func (t *testPeer) Log() log.Logger { return t.logger }
   171  
   172  func (t *testPeer) Stats() string {
   173  	return fmt.Sprintf(`Account requests: %d
   174  Storage requests: %d
   175  Bytecode requests: %d
   176  Trienode requests: %d
   177  `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
   178  }
   179  
   180  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   181  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   182  	t.nAccountRequests++
   183  	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
   184  	return nil
   185  }
   186  
   187  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   188  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   189  	t.nTrienodeRequests++
   190  	go t.trieRequestHandler(t, id, root, paths, bytes)
   191  	return nil
   192  }
   193  
   194  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   195  	t.nStorageRequests++
   196  	if len(accounts) == 1 && origin != nil {
   197  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   198  	} else {
   199  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   200  	}
   201  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   202  	return nil
   203  }
   204  
   205  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   206  	t.nBytecodeRequests++
   207  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   208  	go t.codeRequestHandler(t, id, hashes, bytes)
   209  	return nil
   210  }
   211  
   212  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   213  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   214  	// Pass the response
   215  	var nodes [][]byte
   216  	for _, pathset := range paths {
   217  		switch len(pathset) {
   218  		case 1:
   219  			blob, _, err := t.accountTrie.TryGetNode(pathset[0])
   220  			if err != nil {
   221  				t.logger.Info("Error handling req", "error", err)
   222  				break
   223  			}
   224  			nodes = append(nodes, blob)
   225  		default:
   226  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   227  			for _, path := range pathset[1:] {
   228  				blob, _, err := account.TryGetNode(path)
   229  				if err != nil {
   230  					t.logger.Info("Error handling req", "error", err)
   231  					break
   232  				}
   233  				nodes = append(nodes, blob)
   234  			}
   235  		}
   236  	}
   237  	t.remote.OnTrieNodes(t, requestId, nodes)
   238  	return nil
   239  }
   240  
   241  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   242  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   243  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   244  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   245  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   246  		t.term()
   247  		return err
   248  	}
   249  	return nil
   250  }
   251  
   252  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   253  	var size uint64
   254  	if limit == (common.Hash{}) {
   255  		limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   256  	}
   257  	for _, entry := range t.accountValues {
   258  		if size > cap {
   259  			break
   260  		}
   261  		if bytes.Compare(origin[:], entry.k) <= 0 {
   262  			keys = append(keys, common.BytesToHash(entry.k))
   263  			vals = append(vals, entry.v)
   264  			size += uint64(32 + len(entry.v))
   265  		}
   266  		// If we've exceeded the request threshold, abort
   267  		if bytes.Compare(entry.k, limit[:]) >= 0 {
   268  			break
   269  		}
   270  	}
   271  	// Unless we send the entire trie, we need to supply proofs
   272  	// Actually, we need to supply proofs either way! This seems to be an implementation
   273  	// quirk in go-ethereum
   274  	proof := light.NewNodeSet()
   275  	if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   276  		t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
   277  	}
   278  	if len(keys) > 0 {
   279  		lastK := (keys[len(keys)-1])[:]
   280  		if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
   281  			t.logger.Error("Could not prove last item", "error", err)
   282  		}
   283  	}
   284  	for _, blob := range proof.NodeList() {
   285  		proofs = append(proofs, blob)
   286  	}
   287  	return keys, vals, proofs
   288  }
   289  
   290  // defaultStorageRequestHandler is a well-behaving storage request handler
   291  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   292  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   293  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   294  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   295  		t.term()
   296  	}
   297  	return nil
   298  }
   299  
   300  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   301  	var bytecodes [][]byte
   302  	for _, h := range hashes {
   303  		bytecodes = append(bytecodes, getCodeByHash(h))
   304  	}
   305  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   306  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   307  		t.term()
   308  	}
   309  	return nil
   310  }
   311  
   312  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   313  	var size uint64
   314  	for _, account := range accounts {
   315  		// The first account might start from a different origin and end sooner
   316  		var originHash common.Hash
   317  		if len(origin) > 0 {
   318  			originHash = common.BytesToHash(origin)
   319  		}
   320  		var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   321  		if len(limit) > 0 {
   322  			limitHash = common.BytesToHash(limit)
   323  		}
   324  		var (
   325  			keys  []common.Hash
   326  			vals  [][]byte
   327  			abort bool
   328  		)
   329  		for _, entry := range t.storageValues[account] {
   330  			if size >= max {
   331  				abort = true
   332  				break
   333  			}
   334  			if bytes.Compare(entry.k, originHash[:]) < 0 {
   335  				continue
   336  			}
   337  			keys = append(keys, common.BytesToHash(entry.k))
   338  			vals = append(vals, entry.v)
   339  			size += uint64(32 + len(entry.v))
   340  			if bytes.Compare(entry.k, limitHash[:]) >= 0 {
   341  				break
   342  			}
   343  		}
   344  		if len(keys) > 0 {
   345  			hashes = append(hashes, keys)
   346  			slots = append(slots, vals)
   347  		}
   348  		// Generate the Merkle proofs for the first and last storage slot, but
   349  		// only if the response was capped. If the entire storage trie included
   350  		// in the response, no need for any proofs.
   351  		if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
   352  			// If we're aborting, we need to prove the first and last item
   353  			// This terminates the response (and thus the loop)
   354  			proof := light.NewNodeSet()
   355  			stTrie := t.storageTries[account]
   356  
   357  			// Here's a potential gotcha: when constructing the proof, we cannot
   358  			// use the 'origin' slice directly, but must use the full 32-byte
   359  			// hash form.
   360  			if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
   361  				t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
   362  			}
   363  			if len(keys) > 0 {
   364  				lastK := (keys[len(keys)-1])[:]
   365  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   366  					t.logger.Error("Could not prove last item", "error", err)
   367  				}
   368  			}
   369  			for _, blob := range proof.NodeList() {
   370  				proofs = append(proofs, blob)
   371  			}
   372  			break
   373  		}
   374  	}
   375  	return hashes, slots, proofs
   376  }
   377  
   378  // createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always
   379  // supplies the proof for the last account, even if it is 'complete'.
   380  func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   381  	var size uint64
   382  	max = max * 3 / 4
   383  
   384  	var origin common.Hash
   385  	if len(bOrigin) > 0 {
   386  		origin = common.BytesToHash(bOrigin)
   387  	}
   388  	var exit bool
   389  	for i, account := range accounts {
   390  		var keys []common.Hash
   391  		var vals [][]byte
   392  		for _, entry := range t.storageValues[account] {
   393  			if bytes.Compare(entry.k, origin[:]) < 0 {
   394  				exit = true
   395  			}
   396  			keys = append(keys, common.BytesToHash(entry.k))
   397  			vals = append(vals, entry.v)
   398  			size += uint64(32 + len(entry.v))
   399  			if size > max {
   400  				exit = true
   401  			}
   402  		}
   403  		if i == len(accounts)-1 {
   404  			exit = true
   405  		}
   406  		hashes = append(hashes, keys)
   407  		slots = append(slots, vals)
   408  
   409  		if exit {
   410  			// If we're aborting, we need to prove the first and last item
   411  			// This terminates the response (and thus the loop)
   412  			proof := light.NewNodeSet()
   413  			stTrie := t.storageTries[account]
   414  
   415  			// Here's a potential gotcha: when constructing the proof, we cannot
   416  			// use the 'origin' slice directly, but must use the full 32-byte
   417  			// hash form.
   418  			if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   419  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   420  					"error", err)
   421  			}
   422  			if len(keys) > 0 {
   423  				lastK := (keys[len(keys)-1])[:]
   424  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   425  					t.logger.Error("Could not prove last item", "error", err)
   426  				}
   427  			}
   428  			for _, blob := range proof.NodeList() {
   429  				proofs = append(proofs, blob)
   430  			}
   431  			break
   432  		}
   433  	}
   434  	return hashes, slots, proofs
   435  }
   436  
   437  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   438  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   439  	t.remote.OnAccounts(t, requestId, nil, nil, nil)
   440  	return nil
   441  }
   442  
   443  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   444  	return nil
   445  }
   446  
   447  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   448  	t.remote.OnTrieNodes(t, requestId, nil)
   449  	return nil
   450  }
   451  
   452  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   453  	return nil
   454  }
   455  
   456  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   457  	t.remote.OnStorage(t, requestId, nil, nil, nil)
   458  	return nil
   459  }
   460  
   461  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   462  	return nil
   463  }
   464  
   465  func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   466  	hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
   467  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   468  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   469  		t.term()
   470  	}
   471  	return nil
   472  }
   473  
   474  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   475  //	var bytecodes [][]byte
   476  //	t.remote.OnByteCodes(t, id, bytecodes)
   477  //	return nil
   478  //}
   479  
   480  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   481  	var bytecodes [][]byte
   482  	for _, h := range hashes {
   483  		// Send back the hashes
   484  		bytecodes = append(bytecodes, h[:])
   485  	}
   486  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   487  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   488  		// Mimic the real-life handler, which drops a peer on errors
   489  		t.remote.Unregister(t.id)
   490  	}
   491  	return nil
   492  }
   493  
   494  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   495  	var bytecodes [][]byte
   496  	for _, h := range hashes[:1] {
   497  		bytecodes = append(bytecodes, getCodeByHash(h))
   498  	}
   499  	// Missing bytecode can be retrieved again, no error expected
   500  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   501  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   502  		t.term()
   503  	}
   504  	return nil
   505  }
   506  
   507  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   508  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   509  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   510  }
   511  
   512  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   513  	return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
   514  }
   515  
   516  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   517  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   518  //}
   519  
   520  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   521  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
   522  	if len(proofs) > 0 {
   523  		proofs = proofs[1:]
   524  	}
   525  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   526  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   527  		// Mimic the real-life handler, which drops a peer on errors
   528  		t.remote.Unregister(t.id)
   529  	}
   530  	return nil
   531  }
   532  
   533  // corruptStorageRequestHandler doesn't provide good proofs
   534  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   535  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   536  	if len(proofs) > 0 {
   537  		proofs = proofs[1:]
   538  	}
   539  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   540  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   541  		// Mimic the real-life handler, which drops a peer on errors
   542  		t.remote.Unregister(t.id)
   543  	}
   544  	return nil
   545  }
   546  
   547  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   548  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   549  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   550  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   551  		// Mimic the real-life handler, which drops a peer on errors
   552  		t.remote.Unregister(t.id)
   553  	}
   554  	return nil
   555  }
   556  
   557  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   558  // also ship the entire trie inside the proof. If the attack is successful,
   559  // the remote side does not do any follow-up requests
   560  func TestSyncBloatedProof(t *testing.T) {
   561  	t.Parallel()
   562  
   563  	var (
   564  		once   sync.Once
   565  		cancel = make(chan struct{})
   566  		term   = func() {
   567  			once.Do(func() {
   568  				close(cancel)
   569  			})
   570  		}
   571  	)
   572  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   573  	source := newTestPeer("source", t, term)
   574  	source.accountTrie = sourceAccountTrie.Copy()
   575  	source.accountValues = elems
   576  
   577  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
   578  		var (
   579  			proofs [][]byte
   580  			keys   []common.Hash
   581  			vals   [][]byte
   582  		)
   583  		// The values
   584  		for _, entry := range t.accountValues {
   585  			if bytes.Compare(entry.k, origin[:]) < 0 {
   586  				continue
   587  			}
   588  			if bytes.Compare(entry.k, limit[:]) > 0 {
   589  				continue
   590  			}
   591  			keys = append(keys, common.BytesToHash(entry.k))
   592  			vals = append(vals, entry.v)
   593  		}
   594  		// The proofs
   595  		proof := light.NewNodeSet()
   596  		if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   597  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   598  		}
   599  		// The bloat: add proof of every single element
   600  		for _, entry := range t.accountValues {
   601  			if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
   602  				t.logger.Error("Could not prove item", "error", err)
   603  			}
   604  		}
   605  		// And remove one item from the elements
   606  		if len(keys) > 2 {
   607  			keys = append(keys[:1], keys[2:]...)
   608  			vals = append(vals[:1], vals[2:]...)
   609  		}
   610  		for _, blob := range proof.NodeList() {
   611  			proofs = append(proofs, blob)
   612  		}
   613  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   614  			t.logger.Info("remote error on delivery (as expected)", "error", err)
   615  			t.term()
   616  			// This is actually correct, signal to exit the test successfully
   617  		}
   618  		return nil
   619  	}
   620  	syncer := setupSyncer(nodeScheme, source)
   621  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   622  		t.Fatal("No error returned from incomplete/cancelled sync")
   623  	}
   624  }
   625  
   626  func setupSyncer(scheme trie.NodeScheme, peers ...*testPeer) *Syncer {
   627  	stateDb := rawdb.NewMemoryDatabase()
   628  	syncer := NewSyncer(stateDb, scheme)
   629  	for _, peer := range peers {
   630  		syncer.Register(peer)
   631  		peer.remote = syncer
   632  	}
   633  	return syncer
   634  }
   635  
   636  // TestSync tests a basic sync with one peer
   637  func TestSync(t *testing.T) {
   638  	t.Parallel()
   639  
   640  	var (
   641  		once   sync.Once
   642  		cancel = make(chan struct{})
   643  		term   = func() {
   644  			once.Do(func() {
   645  				close(cancel)
   646  			})
   647  		}
   648  	)
   649  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   650  
   651  	mkSource := func(name string) *testPeer {
   652  		source := newTestPeer(name, t, term)
   653  		source.accountTrie = sourceAccountTrie.Copy()
   654  		source.accountValues = elems
   655  		return source
   656  	}
   657  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   658  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   659  		t.Fatalf("sync failed: %v", err)
   660  	}
   661  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   662  }
   663  
   664  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   665  // panic within the prover
   666  func TestSyncTinyTriePanic(t *testing.T) {
   667  	t.Parallel()
   668  
   669  	var (
   670  		once   sync.Once
   671  		cancel = make(chan struct{})
   672  		term   = func() {
   673  			once.Do(func() {
   674  				close(cancel)
   675  			})
   676  		}
   677  	)
   678  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
   679  
   680  	mkSource := func(name string) *testPeer {
   681  		source := newTestPeer(name, t, term)
   682  		source.accountTrie = sourceAccountTrie.Copy()
   683  		source.accountValues = elems
   684  		return source
   685  	}
   686  	syncer := setupSyncer(nodeScheme, mkSource("source"))
   687  	done := checkStall(t, term)
   688  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   689  		t.Fatalf("sync failed: %v", err)
   690  	}
   691  	close(done)
   692  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   693  }
   694  
   695  // TestMultiSync tests a basic sync with multiple peers
   696  func TestMultiSync(t *testing.T) {
   697  	t.Parallel()
   698  
   699  	var (
   700  		once   sync.Once
   701  		cancel = make(chan struct{})
   702  		term   = func() {
   703  			once.Do(func() {
   704  				close(cancel)
   705  			})
   706  		}
   707  	)
   708  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   709  
   710  	mkSource := func(name string) *testPeer {
   711  		source := newTestPeer(name, t, term)
   712  		source.accountTrie = sourceAccountTrie.Copy()
   713  		source.accountValues = elems
   714  		return source
   715  	}
   716  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"), mkSource("sourceB"))
   717  	done := checkStall(t, term)
   718  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   719  		t.Fatalf("sync failed: %v", err)
   720  	}
   721  	close(done)
   722  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   723  }
   724  
   725  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   726  func TestSyncWithStorage(t *testing.T) {
   727  	t.Parallel()
   728  
   729  	var (
   730  		once   sync.Once
   731  		cancel = make(chan struct{})
   732  		term   = func() {
   733  			once.Do(func() {
   734  				close(cancel)
   735  			})
   736  		}
   737  	)
   738  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
   739  
   740  	mkSource := func(name string) *testPeer {
   741  		source := newTestPeer(name, t, term)
   742  		source.accountTrie = sourceAccountTrie.Copy()
   743  		source.accountValues = elems
   744  		source.setStorageTries(storageTries)
   745  		source.storageValues = storageElems
   746  		return source
   747  	}
   748  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
   749  	done := checkStall(t, term)
   750  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   751  		t.Fatalf("sync failed: %v", err)
   752  	}
   753  	close(done)
   754  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   755  }
   756  
   757  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   758  func TestMultiSyncManyUseless(t *testing.T) {
   759  	t.Parallel()
   760  
   761  	var (
   762  		once   sync.Once
   763  		cancel = make(chan struct{})
   764  		term   = func() {
   765  			once.Do(func() {
   766  				close(cancel)
   767  			})
   768  		}
   769  	)
   770  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   771  
   772  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   773  		source := newTestPeer(name, t, term)
   774  		source.accountTrie = sourceAccountTrie.Copy()
   775  		source.accountValues = elems
   776  		source.setStorageTries(storageTries)
   777  		source.storageValues = storageElems
   778  
   779  		if !noAccount {
   780  			source.accountRequestHandler = emptyRequestAccountRangeFn
   781  		}
   782  		if !noStorage {
   783  			source.storageRequestHandler = emptyStorageRequestHandler
   784  		}
   785  		if !noTrieNode {
   786  			source.trieRequestHandler = emptyTrieRequestHandler
   787  		}
   788  		return source
   789  	}
   790  
   791  	syncer := setupSyncer(
   792  		nodeScheme,
   793  		mkSource("full", true, true, true),
   794  		mkSource("noAccounts", false, true, true),
   795  		mkSource("noStorage", true, false, true),
   796  		mkSource("noTrie", true, true, false),
   797  	)
   798  	done := checkStall(t, term)
   799  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   800  		t.Fatalf("sync failed: %v", err)
   801  	}
   802  	close(done)
   803  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   804  }
   805  
   806  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   807  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   808  	var (
   809  		once   sync.Once
   810  		cancel = make(chan struct{})
   811  		term   = func() {
   812  			once.Do(func() {
   813  				close(cancel)
   814  			})
   815  		}
   816  	)
   817  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   818  
   819  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   820  		source := newTestPeer(name, t, term)
   821  		source.accountTrie = sourceAccountTrie.Copy()
   822  		source.accountValues = elems
   823  		source.setStorageTries(storageTries)
   824  		source.storageValues = storageElems
   825  
   826  		if !noAccount {
   827  			source.accountRequestHandler = emptyRequestAccountRangeFn
   828  		}
   829  		if !noStorage {
   830  			source.storageRequestHandler = emptyStorageRequestHandler
   831  		}
   832  		if !noTrieNode {
   833  			source.trieRequestHandler = emptyTrieRequestHandler
   834  		}
   835  		return source
   836  	}
   837  
   838  	syncer := setupSyncer(
   839  		nodeScheme,
   840  		mkSource("full", true, true, true),
   841  		mkSource("noAccounts", false, true, true),
   842  		mkSource("noStorage", true, false, true),
   843  		mkSource("noTrie", true, true, false),
   844  	)
   845  	// We're setting the timeout to very low, to increase the chance of the timeout
   846  	// being triggered. This was previously a cause of panic, when a response
   847  	// arrived simultaneously as a timeout was triggered.
   848  	syncer.rates.OverrideTTLLimit = time.Millisecond
   849  
   850  	done := checkStall(t, term)
   851  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   852  		t.Fatalf("sync failed: %v", err)
   853  	}
   854  	close(done)
   855  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   856  }
   857  
   858  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   859  func TestMultiSyncManyUnresponsive(t *testing.T) {
   860  	var (
   861  		once   sync.Once
   862  		cancel = make(chan struct{})
   863  		term   = func() {
   864  			once.Do(func() {
   865  				close(cancel)
   866  			})
   867  		}
   868  	)
   869  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
   870  
   871  	mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
   872  		source := newTestPeer(name, t, term)
   873  		source.accountTrie = sourceAccountTrie.Copy()
   874  		source.accountValues = elems
   875  		source.setStorageTries(storageTries)
   876  		source.storageValues = storageElems
   877  
   878  		if !noAccount {
   879  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   880  		}
   881  		if !noStorage {
   882  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   883  		}
   884  		if !noTrieNode {
   885  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   886  		}
   887  		return source
   888  	}
   889  
   890  	syncer := setupSyncer(
   891  		nodeScheme,
   892  		mkSource("full", true, true, true),
   893  		mkSource("noAccounts", false, true, true),
   894  		mkSource("noStorage", true, false, true),
   895  		mkSource("noTrie", true, true, false),
   896  	)
   897  	// We're setting the timeout to very low, to make the test run a bit faster
   898  	syncer.rates.OverrideTTLLimit = time.Millisecond
   899  
   900  	done := checkStall(t, term)
   901  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   902  		t.Fatalf("sync failed: %v", err)
   903  	}
   904  	close(done)
   905  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   906  }
   907  
   908  func checkStall(t *testing.T, term func()) chan struct{} {
   909  	testDone := make(chan struct{})
   910  	go func() {
   911  		select {
   912  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   913  			t.Log("Sync stalled")
   914  			term()
   915  		case <-testDone:
   916  			return
   917  		}
   918  	}()
   919  	return testDone
   920  }
   921  
   922  // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
   923  // account trie has a few boundary elements.
   924  func TestSyncBoundaryAccountTrie(t *testing.T) {
   925  	t.Parallel()
   926  
   927  	var (
   928  		once   sync.Once
   929  		cancel = make(chan struct{})
   930  		term   = func() {
   931  			once.Do(func() {
   932  				close(cancel)
   933  			})
   934  		}
   935  	)
   936  	nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
   937  
   938  	mkSource := func(name string) *testPeer {
   939  		source := newTestPeer(name, t, term)
   940  		source.accountTrie = sourceAccountTrie.Copy()
   941  		source.accountValues = elems
   942  		return source
   943  	}
   944  	syncer := setupSyncer(
   945  		nodeScheme,
   946  		mkSource("peer-a"),
   947  		mkSource("peer-b"),
   948  	)
   949  	done := checkStall(t, term)
   950  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   951  		t.Fatalf("sync failed: %v", err)
   952  	}
   953  	close(done)
   954  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   955  }
   956  
   957  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
   958  // consistently returning very small results
   959  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
   960  	t.Parallel()
   961  
   962  	var (
   963  		once   sync.Once
   964  		cancel = make(chan struct{})
   965  		term   = func() {
   966  			once.Do(func() {
   967  				close(cancel)
   968  			})
   969  		}
   970  	)
   971  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   972  
   973  	mkSource := func(name string, slow bool) *testPeer {
   974  		source := newTestPeer(name, t, term)
   975  		source.accountTrie = sourceAccountTrie.Copy()
   976  		source.accountValues = elems
   977  
   978  		if slow {
   979  			source.accountRequestHandler = starvingAccountRequestHandler
   980  		}
   981  		return source
   982  	}
   983  
   984  	syncer := setupSyncer(
   985  		nodeScheme,
   986  		mkSource("nice-a", false),
   987  		mkSource("nice-b", false),
   988  		mkSource("nice-c", false),
   989  		mkSource("capped", true),
   990  	)
   991  	done := checkStall(t, term)
   992  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   993  		t.Fatalf("sync failed: %v", err)
   994  	}
   995  	close(done)
   996  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
   997  }
   998  
   999  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  1000  // code requests properly.
  1001  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  1002  	t.Parallel()
  1003  
  1004  	var (
  1005  		once   sync.Once
  1006  		cancel = make(chan struct{})
  1007  		term   = func() {
  1008  			once.Do(func() {
  1009  				close(cancel)
  1010  			})
  1011  		}
  1012  	)
  1013  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1014  
  1015  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1016  		source := newTestPeer(name, t, term)
  1017  		source.accountTrie = sourceAccountTrie.Copy()
  1018  		source.accountValues = elems
  1019  		source.codeRequestHandler = codeFn
  1020  		return source
  1021  	}
  1022  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1023  	// chance that the full set of codes requested are sent only to the
  1024  	// non-corrupt peer, which delivers everything in one go, and makes the
  1025  	// test moot
  1026  	syncer := setupSyncer(
  1027  		nodeScheme,
  1028  		mkSource("capped", cappedCodeRequestHandler),
  1029  		mkSource("corrupt", corruptCodeRequestHandler),
  1030  	)
  1031  	done := checkStall(t, term)
  1032  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1033  		t.Fatalf("sync failed: %v", err)
  1034  	}
  1035  	close(done)
  1036  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1037  }
  1038  
  1039  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  1040  	t.Parallel()
  1041  
  1042  	var (
  1043  		once   sync.Once
  1044  		cancel = make(chan struct{})
  1045  		term   = func() {
  1046  			once.Do(func() {
  1047  				close(cancel)
  1048  			})
  1049  		}
  1050  	)
  1051  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1052  
  1053  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  1054  		source := newTestPeer(name, t, term)
  1055  		source.accountTrie = sourceAccountTrie.Copy()
  1056  		source.accountValues = elems
  1057  		source.accountRequestHandler = accFn
  1058  		return source
  1059  	}
  1060  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  1061  	// chance that the full set of codes requested are sent only to the
  1062  	// non-corrupt peer, which delivers everything in one go, and makes the
  1063  	// test moot
  1064  	syncer := setupSyncer(
  1065  		nodeScheme,
  1066  		mkSource("capped", defaultAccountRequestHandler),
  1067  		mkSource("corrupt", corruptAccountRequestHandler),
  1068  	)
  1069  	done := checkStall(t, term)
  1070  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1071  		t.Fatalf("sync failed: %v", err)
  1072  	}
  1073  	close(done)
  1074  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1075  }
  1076  
  1077  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  1078  // one by one
  1079  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  1080  	t.Parallel()
  1081  
  1082  	var (
  1083  		once   sync.Once
  1084  		cancel = make(chan struct{})
  1085  		term   = func() {
  1086  			once.Do(func() {
  1087  				close(cancel)
  1088  			})
  1089  		}
  1090  	)
  1091  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  1092  
  1093  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  1094  		source := newTestPeer(name, t, term)
  1095  		source.accountTrie = sourceAccountTrie.Copy()
  1096  		source.accountValues = elems
  1097  		source.codeRequestHandler = codeFn
  1098  		return source
  1099  	}
  1100  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
  1101  	// so it shouldn't be more than that
  1102  	var counter int
  1103  	syncer := setupSyncer(
  1104  		nodeScheme,
  1105  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  1106  			counter++
  1107  			return cappedCodeRequestHandler(t, id, hashes, max)
  1108  		}),
  1109  	)
  1110  	done := checkStall(t, term)
  1111  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1112  		t.Fatalf("sync failed: %v", err)
  1113  	}
  1114  	close(done)
  1115  
  1116  	// There are only 8 unique hashes, and 3K accounts. However, the code
  1117  	// deduplication is per request batch. If it were a perfect global dedup,
  1118  	// we would expect only 8 requests. If there were no dedup, there would be
  1119  	// 3k requests.
  1120  	// We expect somewhere below 100 requests for these 8 unique hashes. But
  1121  	// the number can be flaky, so don't limit it so strictly.
  1122  	if threshold := 100; counter > threshold {
  1123  		t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
  1124  	}
  1125  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1126  }
  1127  
  1128  // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1129  // storage trie has a few boundary elements.
  1130  func TestSyncBoundaryStorageTrie(t *testing.T) {
  1131  	t.Parallel()
  1132  
  1133  	var (
  1134  		once   sync.Once
  1135  		cancel = make(chan struct{})
  1136  		term   = func() {
  1137  			once.Do(func() {
  1138  				close(cancel)
  1139  			})
  1140  		}
  1141  	)
  1142  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1143  
  1144  	mkSource := func(name string) *testPeer {
  1145  		source := newTestPeer(name, t, term)
  1146  		source.accountTrie = sourceAccountTrie.Copy()
  1147  		source.accountValues = elems
  1148  		source.setStorageTries(storageTries)
  1149  		source.storageValues = storageElems
  1150  		return source
  1151  	}
  1152  	syncer := setupSyncer(
  1153  		nodeScheme,
  1154  		mkSource("peer-a"),
  1155  		mkSource("peer-b"),
  1156  	)
  1157  	done := checkStall(t, term)
  1158  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1159  		t.Fatalf("sync failed: %v", err)
  1160  	}
  1161  	close(done)
  1162  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1163  }
  1164  
  1165  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1166  // consistently returning very small results
  1167  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1168  	t.Parallel()
  1169  
  1170  	var (
  1171  		once   sync.Once
  1172  		cancel = make(chan struct{})
  1173  		term   = func() {
  1174  			once.Do(func() {
  1175  				close(cancel)
  1176  			})
  1177  		}
  1178  	)
  1179  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1180  
  1181  	mkSource := func(name string, slow bool) *testPeer {
  1182  		source := newTestPeer(name, t, term)
  1183  		source.accountTrie = sourceAccountTrie.Copy()
  1184  		source.accountValues = elems
  1185  		source.setStorageTries(storageTries)
  1186  		source.storageValues = storageElems
  1187  
  1188  		if slow {
  1189  			source.storageRequestHandler = starvingStorageRequestHandler
  1190  		}
  1191  		return source
  1192  	}
  1193  
  1194  	syncer := setupSyncer(
  1195  		nodeScheme,
  1196  		mkSource("nice-a", false),
  1197  		mkSource("slow", true),
  1198  	)
  1199  	done := checkStall(t, term)
  1200  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1201  		t.Fatalf("sync failed: %v", err)
  1202  	}
  1203  	close(done)
  1204  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1205  }
  1206  
  1207  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1208  // sometimes sending bad proofs
  1209  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1210  	t.Parallel()
  1211  
  1212  	var (
  1213  		once   sync.Once
  1214  		cancel = make(chan struct{})
  1215  		term   = func() {
  1216  			once.Do(func() {
  1217  				close(cancel)
  1218  			})
  1219  		}
  1220  	)
  1221  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1222  
  1223  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1224  		source := newTestPeer(name, t, term)
  1225  		source.accountTrie = sourceAccountTrie.Copy()
  1226  		source.accountValues = elems
  1227  		source.setStorageTries(storageTries)
  1228  		source.storageValues = storageElems
  1229  		source.storageRequestHandler = handler
  1230  		return source
  1231  	}
  1232  
  1233  	syncer := setupSyncer(
  1234  		nodeScheme,
  1235  		mkSource("nice-a", defaultStorageRequestHandler),
  1236  		mkSource("nice-b", defaultStorageRequestHandler),
  1237  		mkSource("nice-c", defaultStorageRequestHandler),
  1238  		mkSource("corrupt", corruptStorageRequestHandler),
  1239  	)
  1240  	done := checkStall(t, term)
  1241  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1242  		t.Fatalf("sync failed: %v", err)
  1243  	}
  1244  	close(done)
  1245  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1246  }
  1247  
  1248  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1249  	t.Parallel()
  1250  
  1251  	var (
  1252  		once   sync.Once
  1253  		cancel = make(chan struct{})
  1254  		term   = func() {
  1255  			once.Do(func() {
  1256  				close(cancel)
  1257  			})
  1258  		}
  1259  	)
  1260  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1261  
  1262  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1263  		source := newTestPeer(name, t, term)
  1264  		source.accountTrie = sourceAccountTrie.Copy()
  1265  		source.accountValues = elems
  1266  		source.setStorageTries(storageTries)
  1267  		source.storageValues = storageElems
  1268  		source.storageRequestHandler = handler
  1269  		return source
  1270  	}
  1271  	syncer := setupSyncer(
  1272  		nodeScheme,
  1273  		mkSource("nice-a", defaultStorageRequestHandler),
  1274  		mkSource("nice-b", defaultStorageRequestHandler),
  1275  		mkSource("nice-c", defaultStorageRequestHandler),
  1276  		mkSource("corrupt", noProofStorageRequestHandler),
  1277  	)
  1278  	done := checkStall(t, term)
  1279  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1280  		t.Fatalf("sync failed: %v", err)
  1281  	}
  1282  	close(done)
  1283  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1284  }
  1285  
  1286  // TestSyncWithStorage tests  basic sync using accounts + storage + code, against
  1287  // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1288  // an error, where the recipient erroneously clipped the boundary nodes, but
  1289  // did not mark the account for healing.
  1290  func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1291  	t.Parallel()
  1292  	var (
  1293  		once   sync.Once
  1294  		cancel = make(chan struct{})
  1295  		term   = func() {
  1296  			once.Do(func() {
  1297  				close(cancel)
  1298  			})
  1299  		}
  1300  	)
  1301  	nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1302  
  1303  	mkSource := func(name string) *testPeer {
  1304  		source := newTestPeer(name, t, term)
  1305  		source.accountTrie = sourceAccountTrie.Copy()
  1306  		source.accountValues = elems
  1307  		source.setStorageTries(storageTries)
  1308  		source.storageValues = storageElems
  1309  		source.storageRequestHandler = proofHappyStorageRequestHandler
  1310  		return source
  1311  	}
  1312  	syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
  1313  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1314  		t.Fatalf("sync failed: %v", err)
  1315  	}
  1316  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1317  }
  1318  
  1319  type kv struct {
  1320  	k, v []byte
  1321  }
  1322  
  1323  // Some helpers for sorting
  1324  type entrySlice []*kv
  1325  
  1326  func (p entrySlice) Len() int           { return len(p) }
  1327  func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1328  func (p entrySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
  1329  
  1330  func key32(i uint64) []byte {
  1331  	key := make([]byte, 32)
  1332  	binary.LittleEndian.PutUint64(key, i)
  1333  	return key
  1334  }
  1335  
  1336  var (
  1337  	codehashes = []common.Hash{
  1338  		crypto.Keccak256Hash([]byte{0}),
  1339  		crypto.Keccak256Hash([]byte{1}),
  1340  		crypto.Keccak256Hash([]byte{2}),
  1341  		crypto.Keccak256Hash([]byte{3}),
  1342  		crypto.Keccak256Hash([]byte{4}),
  1343  		crypto.Keccak256Hash([]byte{5}),
  1344  		crypto.Keccak256Hash([]byte{6}),
  1345  		crypto.Keccak256Hash([]byte{7}),
  1346  	}
  1347  )
  1348  
  1349  // getCodeHash returns a pseudo-random code hash
  1350  func getCodeHash(i uint64) []byte {
  1351  	h := codehashes[int(i)%len(codehashes)]
  1352  	return common.CopyBytes(h[:])
  1353  }
  1354  
  1355  // getCodeByHash convenience function to lookup the code from the code hash
  1356  func getCodeByHash(hash common.Hash) []byte {
  1357  	if hash == emptyCode {
  1358  		return nil
  1359  	}
  1360  	for i, h := range codehashes {
  1361  		if h == hash {
  1362  			return []byte{byte(i)}
  1363  		}
  1364  	}
  1365  	return nil
  1366  }
  1367  
  1368  // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1369  func makeAccountTrieNoStorage(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
  1370  	var (
  1371  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1372  		accTrie = trie.NewEmpty(db)
  1373  		entries entrySlice
  1374  	)
  1375  	for i := uint64(1); i <= uint64(n); i++ {
  1376  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1377  			Nonce:    i,
  1378  			Balance:  big.NewInt(int64(i)),
  1379  			Root:     emptyRoot,
  1380  			CodeHash: getCodeHash(i),
  1381  		})
  1382  		key := key32(i)
  1383  		elem := &kv{key, value}
  1384  		accTrie.Update(elem.k, elem.v)
  1385  		entries = append(entries, elem)
  1386  	}
  1387  	sort.Sort(entries)
  1388  
  1389  	// Commit the state changes into db and re-create the trie
  1390  	// for accessing later.
  1391  	root, nodes, _ := accTrie.Commit(false)
  1392  	db.Update(trie.NewWithNodeSet(nodes))
  1393  
  1394  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1395  	return db.Scheme(), accTrie, entries
  1396  }
  1397  
  1398  // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1399  // accounts normally, this function will fill a few accounts which have
  1400  // boundary hash.
  1401  func makeBoundaryAccountTrie(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
  1402  	var (
  1403  		entries    entrySlice
  1404  		boundaries []common.Hash
  1405  
  1406  		db      = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1407  		accTrie = trie.NewEmpty(db)
  1408  	)
  1409  	// Initialize boundaries
  1410  	var next common.Hash
  1411  	step := new(big.Int).Sub(
  1412  		new(big.Int).Div(
  1413  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1414  			big.NewInt(int64(accountConcurrency)),
  1415  		), common.Big1,
  1416  	)
  1417  	for i := 0; i < accountConcurrency; i++ {
  1418  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1419  		if i == accountConcurrency-1 {
  1420  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1421  		}
  1422  		boundaries = append(boundaries, last)
  1423  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1424  	}
  1425  	// Fill boundary accounts
  1426  	for i := 0; i < len(boundaries); i++ {
  1427  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1428  			Nonce:    uint64(0),
  1429  			Balance:  big.NewInt(int64(i)),
  1430  			Root:     emptyRoot,
  1431  			CodeHash: getCodeHash(uint64(i)),
  1432  		})
  1433  		elem := &kv{boundaries[i].Bytes(), value}
  1434  		accTrie.Update(elem.k, elem.v)
  1435  		entries = append(entries, elem)
  1436  	}
  1437  	// Fill other accounts if required
  1438  	for i := uint64(1); i <= uint64(n); i++ {
  1439  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1440  			Nonce:    i,
  1441  			Balance:  big.NewInt(int64(i)),
  1442  			Root:     emptyRoot,
  1443  			CodeHash: getCodeHash(i),
  1444  		})
  1445  		elem := &kv{key32(i), value}
  1446  		accTrie.Update(elem.k, elem.v)
  1447  		entries = append(entries, elem)
  1448  	}
  1449  	sort.Sort(entries)
  1450  
  1451  	// Commit the state changes into db and re-create the trie
  1452  	// for accessing later.
  1453  	root, nodes, _ := accTrie.Commit(false)
  1454  	db.Update(trie.NewWithNodeSet(nodes))
  1455  
  1456  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1457  	return db.Scheme(), accTrie, entries
  1458  }
  1459  
  1460  // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1461  // has a unique storage set.
  1462  func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (trie.NodeScheme, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1463  	var (
  1464  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1465  		accTrie        = trie.NewEmpty(db)
  1466  		entries        entrySlice
  1467  		storageRoots   = make(map[common.Hash]common.Hash)
  1468  		storageTries   = make(map[common.Hash]*trie.Trie)
  1469  		storageEntries = make(map[common.Hash]entrySlice)
  1470  		nodes          = trie.NewMergedNodeSet()
  1471  	)
  1472  	// Create n accounts in the trie
  1473  	for i := uint64(1); i <= uint64(accounts); i++ {
  1474  		key := key32(i)
  1475  		codehash := emptyCode[:]
  1476  		if code {
  1477  			codehash = getCodeHash(i)
  1478  		}
  1479  		// Create a storage trie
  1480  		stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
  1481  		nodes.Merge(stNodes)
  1482  
  1483  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1484  			Nonce:    i,
  1485  			Balance:  big.NewInt(int64(i)),
  1486  			Root:     stRoot,
  1487  			CodeHash: codehash,
  1488  		})
  1489  		elem := &kv{key, value}
  1490  		accTrie.Update(elem.k, elem.v)
  1491  		entries = append(entries, elem)
  1492  
  1493  		storageRoots[common.BytesToHash(key)] = stRoot
  1494  		storageEntries[common.BytesToHash(key)] = stEntries
  1495  	}
  1496  	sort.Sort(entries)
  1497  
  1498  	// Commit account trie
  1499  	root, set, _ := accTrie.Commit(true)
  1500  	nodes.Merge(set)
  1501  
  1502  	// Commit gathered dirty nodes into database
  1503  	db.Update(nodes)
  1504  
  1505  	// Re-create tries with new root
  1506  	accTrie, _ = trie.New(trie.StateTrieID(root), db)
  1507  	for i := uint64(1); i <= uint64(accounts); i++ {
  1508  		key := key32(i)
  1509  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1510  		trie, _ := trie.New(id, db)
  1511  		storageTries[common.BytesToHash(key)] = trie
  1512  	}
  1513  	return db.Scheme(), accTrie, entries, storageTries, storageEntries
  1514  }
  1515  
  1516  // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1517  func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (trie.NodeScheme, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1518  	var (
  1519  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1520  		accTrie        = trie.NewEmpty(db)
  1521  		entries        entrySlice
  1522  		storageRoots   = make(map[common.Hash]common.Hash)
  1523  		storageTries   = make(map[common.Hash]*trie.Trie)
  1524  		storageEntries = make(map[common.Hash]entrySlice)
  1525  		nodes          = trie.NewMergedNodeSet()
  1526  	)
  1527  	// Create n accounts in the trie
  1528  	for i := uint64(1); i <= uint64(accounts); i++ {
  1529  		key := key32(i)
  1530  		codehash := emptyCode[:]
  1531  		if code {
  1532  			codehash = getCodeHash(i)
  1533  		}
  1534  		// Make a storage trie
  1535  		var (
  1536  			stRoot    common.Hash
  1537  			stNodes   *trie.NodeSet
  1538  			stEntries entrySlice
  1539  		)
  1540  		if boundary {
  1541  			stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
  1542  		} else {
  1543  			stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
  1544  		}
  1545  		nodes.Merge(stNodes)
  1546  
  1547  		value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1548  			Nonce:    i,
  1549  			Balance:  big.NewInt(int64(i)),
  1550  			Root:     stRoot,
  1551  			CodeHash: codehash,
  1552  		})
  1553  		elem := &kv{key, value}
  1554  		accTrie.Update(elem.k, elem.v)
  1555  		entries = append(entries, elem)
  1556  
  1557  		// we reuse the same one for all accounts
  1558  		storageRoots[common.BytesToHash(key)] = stRoot
  1559  		storageEntries[common.BytesToHash(key)] = stEntries
  1560  	}
  1561  	sort.Sort(entries)
  1562  
  1563  	// Commit account trie
  1564  	root, set, _ := accTrie.Commit(true)
  1565  	nodes.Merge(set)
  1566  
  1567  	// Commit gathered dirty nodes into database
  1568  	db.Update(nodes)
  1569  
  1570  	// Re-create tries with new root
  1571  	accTrie, err := trie.New(trie.StateTrieID(root), db)
  1572  	if err != nil {
  1573  		panic(err)
  1574  	}
  1575  	for i := uint64(1); i <= uint64(accounts); i++ {
  1576  		key := key32(i)
  1577  		id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
  1578  		trie, err := trie.New(id, db)
  1579  		if err != nil {
  1580  			panic(err)
  1581  		}
  1582  		storageTries[common.BytesToHash(key)] = trie
  1583  	}
  1584  	return db.Scheme(), accTrie, entries, storageTries, storageEntries
  1585  }
  1586  
  1587  // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1588  // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1589  // that tries are unique.
  1590  func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
  1591  	trie, _ := trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
  1592  	var entries entrySlice
  1593  	for i := uint64(1); i <= n; i++ {
  1594  		// store 'x' at slot 'x'
  1595  		slotValue := key32(i + seed)
  1596  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1597  
  1598  		slotKey := key32(i)
  1599  		key := crypto.Keccak256Hash(slotKey[:])
  1600  
  1601  		elem := &kv{key[:], rlpSlotValue}
  1602  		trie.Update(elem.k, elem.v)
  1603  		entries = append(entries, elem)
  1604  	}
  1605  	sort.Sort(entries)
  1606  	root, nodes, _ := trie.Commit(false)
  1607  	return root, nodes, entries
  1608  }
  1609  
  1610  // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1611  // storage slots normally, this function will fill a few slots which have
  1612  // boundary hash.
  1613  func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
  1614  	var (
  1615  		entries    entrySlice
  1616  		boundaries []common.Hash
  1617  		trie, _    = trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
  1618  	)
  1619  	// Initialize boundaries
  1620  	var next common.Hash
  1621  	step := new(big.Int).Sub(
  1622  		new(big.Int).Div(
  1623  			new(big.Int).Exp(common.Big2, common.Big256, nil),
  1624  			big.NewInt(int64(accountConcurrency)),
  1625  		), common.Big1,
  1626  	)
  1627  	for i := 0; i < accountConcurrency; i++ {
  1628  		last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1629  		if i == accountConcurrency-1 {
  1630  			last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1631  		}
  1632  		boundaries = append(boundaries, last)
  1633  		next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1634  	}
  1635  	// Fill boundary slots
  1636  	for i := 0; i < len(boundaries); i++ {
  1637  		key := boundaries[i]
  1638  		val := []byte{0xde, 0xad, 0xbe, 0xef}
  1639  
  1640  		elem := &kv{key[:], val}
  1641  		trie.Update(elem.k, elem.v)
  1642  		entries = append(entries, elem)
  1643  	}
  1644  	// Fill other slots if required
  1645  	for i := uint64(1); i <= uint64(n); i++ {
  1646  		slotKey := key32(i)
  1647  		key := crypto.Keccak256Hash(slotKey[:])
  1648  
  1649  		slotValue := key32(i)
  1650  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1651  
  1652  		elem := &kv{key[:], rlpSlotValue}
  1653  		trie.Update(elem.k, elem.v)
  1654  		entries = append(entries, elem)
  1655  	}
  1656  	sort.Sort(entries)
  1657  	root, nodes, _ := trie.Commit(false)
  1658  	return root, nodes, entries
  1659  }
  1660  
  1661  func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1662  	t.Helper()
  1663  	triedb := trie.NewDatabase(rawdb.NewDatabase(db))
  1664  	accTrie, err := trie.New(trie.StateTrieID(root), triedb)
  1665  	if err != nil {
  1666  		t.Fatal(err)
  1667  	}
  1668  	accounts, slots := 0, 0
  1669  	accIt := trie.NewIterator(accTrie.NodeIterator(nil))
  1670  	for accIt.Next() {
  1671  		var acc struct {
  1672  			Nonce    uint64
  1673  			Balance  *big.Int
  1674  			Root     common.Hash
  1675  			CodeHash []byte
  1676  		}
  1677  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1678  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1679  		}
  1680  		accounts++
  1681  		if acc.Root != emptyRoot {
  1682  			id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root)
  1683  			storeTrie, err := trie.NewStateTrie(id, triedb)
  1684  			if err != nil {
  1685  				t.Fatal(err)
  1686  			}
  1687  			storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
  1688  			for storeIt.Next() {
  1689  				slots++
  1690  			}
  1691  			if err := storeIt.Err; err != nil {
  1692  				t.Fatal(err)
  1693  			}
  1694  		}
  1695  	}
  1696  	if err := accIt.Err; err != nil {
  1697  		t.Fatal(err)
  1698  	}
  1699  	t.Logf("accounts: %d, slots: %d", accounts, slots)
  1700  }
  1701  
  1702  // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1703  // state healing
  1704  func TestSyncAccountPerformance(t *testing.T) {
  1705  	// Set the account concurrency to 1. This _should_ result in the
  1706  	// range root to become correct, and there should be no healing needed
  1707  	defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1708  	accountConcurrency = 1
  1709  
  1710  	var (
  1711  		once   sync.Once
  1712  		cancel = make(chan struct{})
  1713  		term   = func() {
  1714  			once.Do(func() {
  1715  				close(cancel)
  1716  			})
  1717  		}
  1718  	)
  1719  	nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  1720  
  1721  	mkSource := func(name string) *testPeer {
  1722  		source := newTestPeer(name, t, term)
  1723  		source.accountTrie = sourceAccountTrie.Copy()
  1724  		source.accountValues = elems
  1725  		return source
  1726  	}
  1727  	src := mkSource("source")
  1728  	syncer := setupSyncer(nodeScheme, src)
  1729  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1730  		t.Fatalf("sync failed: %v", err)
  1731  	}
  1732  	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1733  	// The trie root will always be requested, since it is added when the snap
  1734  	// sync cycle starts. When popping the queue, we do not look it up again.
  1735  	// Doing so would bring this number down to zero in this artificial testcase,
  1736  	// but only add extra IO for no reason in practice.
  1737  	if have, want := src.nTrienodeRequests, 1; have != want {
  1738  		fmt.Print(src.Stats())
  1739  		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1740  	}
  1741  }
  1742  
  1743  func TestSlotEstimation(t *testing.T) {
  1744  	for i, tc := range []struct {
  1745  		last  common.Hash
  1746  		count int
  1747  		want  uint64
  1748  	}{
  1749  		{
  1750  			// Half the space
  1751  			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1752  			100,
  1753  			100,
  1754  		},
  1755  		{
  1756  			// 1 / 16th
  1757  			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1758  			100,
  1759  			1500,
  1760  		},
  1761  		{
  1762  			// Bit more than 1 / 16th
  1763  			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1764  			100,
  1765  			1499,
  1766  		},
  1767  		{
  1768  			// Almost everything
  1769  			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1770  			100,
  1771  			6,
  1772  		},
  1773  		{
  1774  			// Almost nothing -- should lead to error
  1775  			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1776  			1,
  1777  			0,
  1778  		},
  1779  		{
  1780  			// Nothing -- should lead to error
  1781  			common.Hash{},
  1782  			100,
  1783  			0,
  1784  		},
  1785  	} {
  1786  		have, _ := estimateRemainingSlots(tc.count, tc.last)
  1787  		if want := tc.want; have != want {
  1788  			t.Errorf("test %d: have %d want %d", i, have, want)
  1789  		}
  1790  	}
  1791  }