github.com/juliankolbe/go-ethereum@v1.9.992/eth/protocols/snap/sync_test.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"math/big"
    25  	"sort"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/juliankolbe/go-ethereum/common"
    30  	"github.com/juliankolbe/go-ethereum/core/rawdb"
    31  	"github.com/juliankolbe/go-ethereum/core/state"
    32  	"github.com/juliankolbe/go-ethereum/crypto"
    33  	"github.com/juliankolbe/go-ethereum/light"
    34  	"github.com/juliankolbe/go-ethereum/log"
    35  	"github.com/juliankolbe/go-ethereum/rlp"
    36  	"github.com/juliankolbe/go-ethereum/trie"
    37  	"golang.org/x/crypto/sha3"
    38  )
    39  
    40  func TestHashing(t *testing.T) {
    41  	t.Parallel()
    42  
    43  	var bytecodes = make([][]byte, 10)
    44  	for i := 0; i < len(bytecodes); i++ {
    45  		buf := make([]byte, 100)
    46  		rand.Read(buf)
    47  		bytecodes[i] = buf
    48  	}
    49  	var want, got string
    50  	var old = func() {
    51  		hasher := sha3.NewLegacyKeccak256()
    52  		for i := 0; i < len(bytecodes); i++ {
    53  			hasher.Reset()
    54  			hasher.Write(bytecodes[i])
    55  			hash := hasher.Sum(nil)
    56  			got = fmt.Sprintf("%v\n%v", got, hash)
    57  		}
    58  	}
    59  	var new = func() {
    60  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    61  		var hash = make([]byte, 32)
    62  		for i := 0; i < len(bytecodes); i++ {
    63  			hasher.Reset()
    64  			hasher.Write(bytecodes[i])
    65  			hasher.Read(hash)
    66  			want = fmt.Sprintf("%v\n%v", want, hash)
    67  		}
    68  	}
    69  	old()
    70  	new()
    71  	if want != got {
    72  		t.Errorf("want\n%v\ngot\n%v\n", want, got)
    73  	}
    74  }
    75  
    76  func BenchmarkHashing(b *testing.B) {
    77  	var bytecodes = make([][]byte, 10000)
    78  	for i := 0; i < len(bytecodes); i++ {
    79  		buf := make([]byte, 100)
    80  		rand.Read(buf)
    81  		bytecodes[i] = buf
    82  	}
    83  	var old = func() {
    84  		hasher := sha3.NewLegacyKeccak256()
    85  		for i := 0; i < len(bytecodes); i++ {
    86  			hasher.Reset()
    87  			hasher.Write(bytecodes[i])
    88  			hasher.Sum(nil)
    89  		}
    90  	}
    91  	var new = func() {
    92  		hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
    93  		var hash = make([]byte, 32)
    94  		for i := 0; i < len(bytecodes); i++ {
    95  			hasher.Reset()
    96  			hasher.Write(bytecodes[i])
    97  			hasher.Read(hash)
    98  		}
    99  	}
   100  	b.Run("old", func(b *testing.B) {
   101  		b.ReportAllocs()
   102  		for i := 0; i < b.N; i++ {
   103  			old()
   104  		}
   105  	})
   106  	b.Run("new", func(b *testing.B) {
   107  		b.ReportAllocs()
   108  		for i := 0; i < b.N; i++ {
   109  			new()
   110  		}
   111  	})
   112  }
   113  
   114  type storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
   115  type accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error
   116  type trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
   117  type codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
   118  
   119  type testPeer struct {
   120  	id            string
   121  	test          *testing.T
   122  	remote        *Syncer
   123  	logger        log.Logger
   124  	accountTrie   *trie.Trie
   125  	accountValues entrySlice
   126  	storageTries  map[common.Hash]*trie.Trie
   127  	storageValues map[common.Hash]entrySlice
   128  
   129  	accountRequestHandler accountHandlerFunc
   130  	storageRequestHandler storageHandlerFunc
   131  	trieRequestHandler    trieHandlerFunc
   132  	codeRequestHandler    codeHandlerFunc
   133  	cancelCh              chan struct{}
   134  }
   135  
   136  func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer {
   137  	peer := &testPeer{
   138  		id:                    id,
   139  		test:                  t,
   140  		logger:                log.New("id", id),
   141  		accountRequestHandler: defaultAccountRequestHandler,
   142  		trieRequestHandler:    defaultTrieRequestHandler,
   143  		storageRequestHandler: defaultStorageRequestHandler,
   144  		codeRequestHandler:    defaultCodeRequestHandler,
   145  		cancelCh:              cancelCh,
   146  	}
   147  	//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
   148  	//peer.logger.SetHandler(stderrHandler)
   149  	return peer
   150  
   151  }
   152  
   153  func (t *testPeer) ID() string      { return t.id }
   154  func (t *testPeer) Log() log.Logger { return t.logger }
   155  
   156  func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
   157  	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
   158  	go t.accountRequestHandler(t, id, root, origin, bytes)
   159  	return nil
   160  }
   161  
   162  func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
   163  	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
   164  	go t.trieRequestHandler(t, id, root, paths, bytes)
   165  	return nil
   166  }
   167  
   168  func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
   169  	if len(accounts) == 1 && origin != nil {
   170  		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
   171  	} else {
   172  		t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
   173  	}
   174  	go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
   175  	return nil
   176  }
   177  
   178  func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
   179  	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
   180  	go t.codeRequestHandler(t, id, hashes, bytes)
   181  	return nil
   182  }
   183  
   184  // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
   185  func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   186  	// Pass the response
   187  	var nodes [][]byte
   188  	for _, pathset := range paths {
   189  		switch len(pathset) {
   190  		case 1:
   191  			blob, _, err := t.accountTrie.TryGetNode(pathset[0])
   192  			if err != nil {
   193  				t.logger.Info("Error handling req", "error", err)
   194  				break
   195  			}
   196  			nodes = append(nodes, blob)
   197  		default:
   198  			account := t.storageTries[(common.BytesToHash(pathset[0]))]
   199  			for _, path := range pathset[1:] {
   200  				blob, _, err := account.TryGetNode(path)
   201  				if err != nil {
   202  					t.logger.Info("Error handling req", "error", err)
   203  					break
   204  				}
   205  				nodes = append(nodes, blob)
   206  			}
   207  		}
   208  	}
   209  	t.remote.OnTrieNodes(t, requestId, nodes)
   210  	return nil
   211  }
   212  
   213  // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
   214  func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, cap uint64) error {
   215  	keys, vals, proofs := createAccountRequestResponse(t, root, origin, cap)
   216  	if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
   217  		t.logger.Error("remote error on delivery", "error", err)
   218  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   219  		t.remote.Unregister(t.id)
   220  		close(t.cancelCh)
   221  		return err
   222  	}
   223  	return nil
   224  }
   225  
   226  func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
   227  	var size uint64
   228  	for _, entry := range t.accountValues {
   229  		if size > cap {
   230  			break
   231  		}
   232  		if bytes.Compare(origin[:], entry.k) <= 0 {
   233  			keys = append(keys, common.BytesToHash(entry.k))
   234  			vals = append(vals, entry.v)
   235  			size += uint64(32 + len(entry.v))
   236  		}
   237  	}
   238  	// Unless we send the entire trie, we need to supply proofs
   239  	// Actually, we need to supply proofs either way! This seems tob be an implementation
   240  	// quirk in go-ethereum
   241  	proof := light.NewNodeSet()
   242  	if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   243  		t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   244  			"error", err)
   245  	}
   246  	if len(keys) > 0 {
   247  		lastK := (keys[len(keys)-1])[:]
   248  		if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
   249  			t.logger.Error("Could not prove last item",
   250  				"error", err)
   251  		}
   252  	}
   253  	for _, blob := range proof.NodeList() {
   254  		proofs = append(proofs, blob)
   255  	}
   256  	return keys, vals, proofs
   257  }
   258  
   259  // defaultStorageRequestHandler is a well-behaving storage request handler
   260  func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
   261  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
   262  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   263  		t.logger.Error("remote error on delivery", "error", err)
   264  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   265  		close(t.cancelCh)
   266  	}
   267  	return nil
   268  }
   269  
   270  func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   271  	var bytecodes [][]byte
   272  	for _, h := range hashes {
   273  		bytecodes = append(bytecodes, getCode(h))
   274  	}
   275  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   276  		t.logger.Error("remote error on delivery", "error", err)
   277  		t.test.Errorf("Remote side rejected our delivery: %v", err)
   278  		close(t.cancelCh)
   279  	}
   280  	return nil
   281  }
   282  
   283  func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
   284  	var (
   285  		size  uint64
   286  		limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   287  	)
   288  	if len(bLimit) > 0 {
   289  		limit = common.BytesToHash(bLimit)
   290  	}
   291  	var origin common.Hash
   292  	if len(bOrigin) > 0 {
   293  		origin = common.BytesToHash(bOrigin)
   294  	}
   295  
   296  	var limitExceeded bool
   297  	var incomplete bool
   298  	for _, account := range accounts {
   299  
   300  		var keys []common.Hash
   301  		var vals [][]byte
   302  		for _, entry := range t.storageValues[account] {
   303  			if limitExceeded {
   304  				incomplete = true
   305  				break
   306  			}
   307  			if bytes.Compare(entry.k, origin[:]) < 0 {
   308  				incomplete = true
   309  				continue
   310  			}
   311  			keys = append(keys, common.BytesToHash(entry.k))
   312  			vals = append(vals, entry.v)
   313  			size += uint64(32 + len(entry.v))
   314  			if bytes.Compare(entry.k, limit[:]) >= 0 {
   315  				limitExceeded = true
   316  			}
   317  			if size > max {
   318  				limitExceeded = true
   319  			}
   320  		}
   321  		hashes = append(hashes, keys)
   322  		slots = append(slots, vals)
   323  
   324  		if incomplete {
   325  			// If we're aborting, we need to prove the first and last item
   326  			// This terminates the response (and thus the loop)
   327  			proof := light.NewNodeSet()
   328  			stTrie := t.storageTries[account]
   329  
   330  			// Here's a potential gotcha: when constructing the proof, we cannot
   331  			// use the 'origin' slice directly, but must use the full 32-byte
   332  			// hash form.
   333  			if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   334  				t.logger.Error("Could not prove inexistence of origin", "origin", origin,
   335  					"error", err)
   336  			}
   337  			if len(keys) > 0 {
   338  				lastK := (keys[len(keys)-1])[:]
   339  				if err := stTrie.Prove(lastK, 0, proof); err != nil {
   340  					t.logger.Error("Could not prove last item", "error", err)
   341  				}
   342  			}
   343  			for _, blob := range proof.NodeList() {
   344  				proofs = append(proofs, blob)
   345  			}
   346  			break
   347  		}
   348  	}
   349  	return hashes, slots, proofs
   350  }
   351  
   352  // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
   353  func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   354  	var proofs [][]byte
   355  	var keys []common.Hash
   356  	var vals [][]byte
   357  	t.remote.OnAccounts(t, requestId, keys, vals, proofs)
   358  	return nil
   359  }
   360  
   361  func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   362  	return nil
   363  }
   364  
   365  func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   366  	var nodes [][]byte
   367  	t.remote.OnTrieNodes(t, requestId, nodes)
   368  	return nil
   369  }
   370  
   371  func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
   372  	return nil
   373  }
   374  
   375  func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   376  	var hashes [][]common.Hash
   377  	var slots [][][]byte
   378  	var proofs [][]byte
   379  	t.remote.OnStorage(t, requestId, hashes, slots, proofs)
   380  	return nil
   381  }
   382  
   383  func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   384  	return nil
   385  }
   386  
   387  //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   388  //	var bytecodes [][]byte
   389  //	t.remote.OnByteCodes(t, id, bytecodes)
   390  //	return nil
   391  //}
   392  
   393  func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   394  	var bytecodes [][]byte
   395  	for _, h := range hashes {
   396  		// Send back the hashes
   397  		bytecodes = append(bytecodes, h[:])
   398  	}
   399  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   400  		t.logger.Error("remote error on delivery", "error", err)
   401  		// Mimic the real-life handler, which drops a peer on errors
   402  		t.remote.Unregister(t.id)
   403  	}
   404  	return nil
   405  }
   406  
   407  func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   408  	var bytecodes [][]byte
   409  	for _, h := range hashes[:1] {
   410  		bytecodes = append(bytecodes, getCode(h))
   411  	}
   412  	if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
   413  		t.logger.Error("remote error on delivery", "error", err)
   414  		// Mimic the real-life handler, which drops a peer on errors
   415  		t.remote.Unregister(t.id)
   416  	}
   417  	return nil
   418  }
   419  
   420  // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
   421  func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   422  	return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
   423  }
   424  
   425  func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   426  	return defaultAccountRequestHandler(t, requestId, root, origin, 500)
   427  }
   428  
   429  //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   430  //	return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
   431  //}
   432  
   433  func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   434  	hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, cap)
   435  	if len(proofs) > 0 {
   436  		proofs = proofs[1:]
   437  	}
   438  	if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
   439  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   440  		// Mimic the real-life handler, which drops a peer on errors
   441  		t.remote.Unregister(t.id)
   442  	}
   443  	return nil
   444  }
   445  
   446  // corruptStorageRequestHandler doesn't provide good proofs
   447  func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   448  	hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   449  	if len(proofs) > 0 {
   450  		proofs = proofs[1:]
   451  	}
   452  	if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
   453  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   454  		// Mimic the real-life handler, which drops a peer on errors
   455  		t.remote.Unregister(t.id)
   456  	}
   457  	return nil
   458  }
   459  
   460  func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
   461  	hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
   462  	if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
   463  		t.logger.Info("remote error on delivery (as expected)", "error", err)
   464  		// Mimic the real-life handler, which drops a peer on errors
   465  		t.remote.Unregister(t.id)
   466  	}
   467  	return nil
   468  }
   469  
   470  // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
   471  // also ship the entire trie inside the proof. If the attack is successful,
   472  // the remote side does not do any follow-up requests
   473  func TestSyncBloatedProof(t *testing.T) {
   474  	t.Parallel()
   475  
   476  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   477  	cancel := make(chan struct{})
   478  	source := newTestPeer("source", t, cancel)
   479  	source.accountTrie = sourceAccountTrie
   480  	source.accountValues = elems
   481  
   482  	source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
   483  		var proofs [][]byte
   484  		var keys []common.Hash
   485  		var vals [][]byte
   486  
   487  		// The values
   488  		for _, entry := range t.accountValues {
   489  			if bytes.Compare(origin[:], entry.k) <= 0 {
   490  				keys = append(keys, common.BytesToHash(entry.k))
   491  				vals = append(vals, entry.v)
   492  			}
   493  		}
   494  		// The proofs
   495  		proof := light.NewNodeSet()
   496  		if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
   497  			t.logger.Error("Could not prove origin", "origin", origin, "error", err)
   498  		}
   499  		// The bloat: add proof of every single element
   500  		for _, entry := range t.accountValues {
   501  			if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
   502  				t.logger.Error("Could not prove item", "error", err)
   503  			}
   504  		}
   505  		// And remove one item from the elements
   506  		if len(keys) > 2 {
   507  			keys = append(keys[:1], keys[2:]...)
   508  			vals = append(vals[:1], vals[2:]...)
   509  		}
   510  		for _, blob := range proof.NodeList() {
   511  			proofs = append(proofs, blob)
   512  		}
   513  		if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
   514  			t.logger.Info("remote error on delivery", "error", err)
   515  			// This is actually correct, signal to exit the test successfully
   516  			close(t.cancelCh)
   517  		}
   518  		return nil
   519  	}
   520  	syncer := setupSyncer(source)
   521  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
   522  		t.Fatal("No error returned from incomplete/cancelled sync")
   523  	}
   524  }
   525  
   526  func setupSyncer(peers ...*testPeer) *Syncer {
   527  	stateDb := rawdb.NewMemoryDatabase()
   528  	syncer := NewSyncer(stateDb, trie.NewSyncBloom(1, stateDb))
   529  	for _, peer := range peers {
   530  		syncer.Register(peer)
   531  		peer.remote = syncer
   532  	}
   533  	return syncer
   534  }
   535  
   536  // TestSync tests a basic sync with one peer
   537  func TestSync(t *testing.T) {
   538  	t.Parallel()
   539  
   540  	cancel := make(chan struct{})
   541  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   542  
   543  	mkSource := func(name string) *testPeer {
   544  		source := newTestPeer(name, t, cancel)
   545  		source.accountTrie = sourceAccountTrie
   546  		source.accountValues = elems
   547  		return source
   548  	}
   549  
   550  	syncer := setupSyncer(mkSource("sourceA"))
   551  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   552  		t.Fatalf("sync failed: %v", err)
   553  	}
   554  }
   555  
   556  // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
   557  // panic within the prover
   558  func TestSyncTinyTriePanic(t *testing.T) {
   559  	t.Parallel()
   560  
   561  	cancel := make(chan struct{})
   562  
   563  	sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
   564  
   565  	mkSource := func(name string) *testPeer {
   566  		source := newTestPeer(name, t, cancel)
   567  		source.accountTrie = sourceAccountTrie
   568  		source.accountValues = elems
   569  		return source
   570  	}
   571  
   572  	syncer := setupSyncer(
   573  		mkSource("nice-a"),
   574  	)
   575  	done := checkStall(t, cancel)
   576  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   577  		t.Fatalf("sync failed: %v", err)
   578  	}
   579  	close(done)
   580  }
   581  
   582  // TestMultiSync tests a basic sync with multiple peers
   583  func TestMultiSync(t *testing.T) {
   584  	t.Parallel()
   585  
   586  	cancel := make(chan struct{})
   587  	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
   588  
   589  	mkSource := func(name string) *testPeer {
   590  		source := newTestPeer(name, t, cancel)
   591  		source.accountTrie = sourceAccountTrie
   592  		source.accountValues = elems
   593  		return source
   594  	}
   595  
   596  	syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
   597  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   598  		t.Fatalf("sync failed: %v", err)
   599  	}
   600  }
   601  
   602  // TestSyncWithStorage tests  basic sync using accounts + storage + code
   603  func TestSyncWithStorage(t *testing.T) {
   604  	t.Parallel()
   605  
   606  	cancel := make(chan struct{})
   607  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true)
   608  
   609  	mkSource := func(name string) *testPeer {
   610  		source := newTestPeer(name, t, cancel)
   611  		source.accountTrie = sourceAccountTrie
   612  		source.accountValues = elems
   613  		source.storageTries = storageTries
   614  		source.storageValues = storageElems
   615  		return source
   616  	}
   617  	syncer := setupSyncer(mkSource("sourceA"))
   618  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   619  		t.Fatalf("sync failed: %v", err)
   620  	}
   621  }
   622  
   623  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   624  func TestMultiSyncManyUseless(t *testing.T) {
   625  	t.Parallel()
   626  
   627  	cancel := make(chan struct{})
   628  
   629  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
   630  
   631  	mkSource := func(name string, a, b, c bool) *testPeer {
   632  		source := newTestPeer(name, t, cancel)
   633  		source.accountTrie = sourceAccountTrie
   634  		source.accountValues = elems
   635  		source.storageTries = storageTries
   636  		source.storageValues = storageElems
   637  
   638  		if !a {
   639  			source.accountRequestHandler = emptyRequestAccountRangeFn
   640  		}
   641  		if !b {
   642  			source.storageRequestHandler = emptyStorageRequestHandler
   643  		}
   644  		if !c {
   645  			source.trieRequestHandler = emptyTrieRequestHandler
   646  		}
   647  		return source
   648  	}
   649  
   650  	syncer := setupSyncer(
   651  		mkSource("full", true, true, true),
   652  		mkSource("noAccounts", false, true, true),
   653  		mkSource("noStorage", true, false, true),
   654  		mkSource("noTrie", true, true, false),
   655  	)
   656  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   657  		t.Fatalf("sync failed: %v", err)
   658  	}
   659  }
   660  
   661  // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
   662  func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
   663  	// We're setting the timeout to very low, to increase the chance of the timeout
   664  	// being triggered. This was previously a cause of panic, when a response
   665  	// arrived simultaneously as a timeout was triggered.
   666  	defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
   667  	requestTimeout = time.Millisecond
   668  
   669  	cancel := make(chan struct{})
   670  
   671  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
   672  
   673  	mkSource := func(name string, a, b, c bool) *testPeer {
   674  		source := newTestPeer(name, t, cancel)
   675  		source.accountTrie = sourceAccountTrie
   676  		source.accountValues = elems
   677  		source.storageTries = storageTries
   678  		source.storageValues = storageElems
   679  
   680  		if !a {
   681  			source.accountRequestHandler = emptyRequestAccountRangeFn
   682  		}
   683  		if !b {
   684  			source.storageRequestHandler = emptyStorageRequestHandler
   685  		}
   686  		if !c {
   687  			source.trieRequestHandler = emptyTrieRequestHandler
   688  		}
   689  		return source
   690  	}
   691  
   692  	syncer := setupSyncer(
   693  		mkSource("full", true, true, true),
   694  		mkSource("noAccounts", false, true, true),
   695  		mkSource("noStorage", true, false, true),
   696  		mkSource("noTrie", true, true, false),
   697  	)
   698  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   699  		t.Fatalf("sync failed: %v", err)
   700  	}
   701  }
   702  
   703  // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
   704  func TestMultiSyncManyUnresponsive(t *testing.T) {
   705  	// We're setting the timeout to very low, to make the test run a bit faster
   706  	defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
   707  	requestTimeout = time.Millisecond
   708  
   709  	cancel := make(chan struct{})
   710  
   711  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
   712  
   713  	mkSource := func(name string, a, b, c bool) *testPeer {
   714  		source := newTestPeer(name, t, cancel)
   715  		source.accountTrie = sourceAccountTrie
   716  		source.accountValues = elems
   717  		source.storageTries = storageTries
   718  		source.storageValues = storageElems
   719  
   720  		if !a {
   721  			source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
   722  		}
   723  		if !b {
   724  			source.storageRequestHandler = nonResponsiveStorageRequestHandler
   725  		}
   726  		if !c {
   727  			source.trieRequestHandler = nonResponsiveTrieRequestHandler
   728  		}
   729  		return source
   730  	}
   731  
   732  	syncer := setupSyncer(
   733  		mkSource("full", true, true, true),
   734  		mkSource("noAccounts", false, true, true),
   735  		mkSource("noStorage", true, false, true),
   736  		mkSource("noTrie", true, true, false),
   737  	)
   738  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   739  		t.Fatalf("sync failed: %v", err)
   740  	}
   741  }
   742  
   743  func checkStall(t *testing.T, cancel chan struct{}) chan struct{} {
   744  	testDone := make(chan struct{})
   745  	go func() {
   746  		select {
   747  		case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
   748  			t.Log("Sync stalled")
   749  			close(cancel)
   750  		case <-testDone:
   751  			return
   752  		}
   753  	}()
   754  	return testDone
   755  }
   756  
   757  // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
   758  // consistently returning very small results
   759  func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
   760  	t.Parallel()
   761  
   762  	cancel := make(chan struct{})
   763  
   764  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   765  
   766  	mkSource := func(name string, slow bool) *testPeer {
   767  		source := newTestPeer(name, t, cancel)
   768  		source.accountTrie = sourceAccountTrie
   769  		source.accountValues = elems
   770  
   771  		if slow {
   772  			source.accountRequestHandler = starvingAccountRequestHandler
   773  		}
   774  		return source
   775  	}
   776  
   777  	syncer := setupSyncer(
   778  		mkSource("nice-a", false),
   779  		mkSource("nice-b", false),
   780  		mkSource("nice-c", false),
   781  		mkSource("capped", true),
   782  	)
   783  	done := checkStall(t, cancel)
   784  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   785  		t.Fatalf("sync failed: %v", err)
   786  	}
   787  	close(done)
   788  }
   789  
   790  // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
   791  // code requests properly.
   792  func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
   793  	t.Parallel()
   794  
   795  	cancel := make(chan struct{})
   796  
   797  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   798  
   799  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
   800  		source := newTestPeer(name, t, cancel)
   801  		source.accountTrie = sourceAccountTrie
   802  		source.accountValues = elems
   803  		source.codeRequestHandler = codeFn
   804  		return source
   805  	}
   806  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
   807  	// chance that the full set of codes requested are sent only to the
   808  	// non-corrupt peer, which delivers everything in one go, and makes the
   809  	// test moot
   810  	syncer := setupSyncer(
   811  		mkSource("capped", cappedCodeRequestHandler),
   812  		mkSource("corrupt", corruptCodeRequestHandler),
   813  	)
   814  	done := checkStall(t, cancel)
   815  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   816  		t.Fatalf("sync failed: %v", err)
   817  	}
   818  	close(done)
   819  }
   820  
   821  func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
   822  	t.Parallel()
   823  
   824  	cancel := make(chan struct{})
   825  
   826  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   827  
   828  	mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
   829  		source := newTestPeer(name, t, cancel)
   830  		source.accountTrie = sourceAccountTrie
   831  		source.accountValues = elems
   832  		source.accountRequestHandler = accFn
   833  		return source
   834  	}
   835  	// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
   836  	// chance that the full set of codes requested are sent only to the
   837  	// non-corrupt peer, which delivers everything in one go, and makes the
   838  	// test moot
   839  	syncer := setupSyncer(
   840  		mkSource("capped", defaultAccountRequestHandler),
   841  		mkSource("corrupt", corruptAccountRequestHandler),
   842  	)
   843  	done := checkStall(t, cancel)
   844  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   845  		t.Fatalf("sync failed: %v", err)
   846  	}
   847  	close(done)
   848  }
   849  
   850  // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
   851  // one by one
   852  func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
   853  	t.Parallel()
   854  
   855  	cancel := make(chan struct{})
   856  
   857  	sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
   858  
   859  	mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
   860  		source := newTestPeer(name, t, cancel)
   861  		source.accountTrie = sourceAccountTrie
   862  		source.accountValues = elems
   863  		source.codeRequestHandler = codeFn
   864  		return source
   865  	}
   866  	// Count how many times it's invoked. Remember, there are only 8 unique hashes,
   867  	// so it shouldn't be more than that
   868  	var counter int
   869  	syncer := setupSyncer(
   870  		mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
   871  			counter++
   872  			return cappedCodeRequestHandler(t, id, hashes, max)
   873  		}),
   874  	)
   875  	done := checkStall(t, cancel)
   876  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   877  		t.Fatalf("sync failed: %v", err)
   878  	}
   879  	close(done)
   880  	// There are only 8 unique hashes, and 3K accounts. However, the code
   881  	// deduplication is per request batch. If it were a perfect global dedup,
   882  	// we would expect only 8 requests. If there were no dedup, there would be
   883  	// 3k requests.
   884  	// We expect somewhere below 100 requests for these 8 unique hashes.
   885  	if threshold := 100; counter > threshold {
   886  		t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
   887  	}
   888  }
   889  
   890  // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
   891  // consistently returning very small results
   892  func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
   893  	t.Parallel()
   894  
   895  	cancel := make(chan struct{})
   896  
   897  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false)
   898  
   899  	mkSource := func(name string, slow bool) *testPeer {
   900  		source := newTestPeer(name, t, cancel)
   901  		source.accountTrie = sourceAccountTrie
   902  		source.accountValues = elems
   903  		source.storageTries = storageTries
   904  		source.storageValues = storageElems
   905  
   906  		if slow {
   907  			source.storageRequestHandler = starvingStorageRequestHandler
   908  		}
   909  		return source
   910  	}
   911  
   912  	syncer := setupSyncer(
   913  		mkSource("nice-a", false),
   914  		mkSource("slow", true),
   915  	)
   916  	done := checkStall(t, cancel)
   917  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   918  		t.Fatalf("sync failed: %v", err)
   919  	}
   920  	close(done)
   921  }
   922  
   923  // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
   924  // sometimes sending bad proofs
   925  func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
   926  	t.Parallel()
   927  
   928  	cancel := make(chan struct{})
   929  
   930  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
   931  
   932  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
   933  		source := newTestPeer(name, t, cancel)
   934  		source.accountTrie = sourceAccountTrie
   935  		source.accountValues = elems
   936  		source.storageTries = storageTries
   937  		source.storageValues = storageElems
   938  		source.storageRequestHandler = handler
   939  		return source
   940  	}
   941  
   942  	syncer := setupSyncer(
   943  		mkSource("nice-a", defaultStorageRequestHandler),
   944  		mkSource("nice-b", defaultStorageRequestHandler),
   945  		mkSource("nice-c", defaultStorageRequestHandler),
   946  		mkSource("corrupt", corruptStorageRequestHandler),
   947  	)
   948  	done := checkStall(t, cancel)
   949  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   950  		t.Fatalf("sync failed: %v", err)
   951  	}
   952  	close(done)
   953  }
   954  
   955  func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
   956  	t.Parallel()
   957  
   958  	cancel := make(chan struct{})
   959  
   960  	sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
   961  
   962  	mkSource := func(name string, handler storageHandlerFunc) *testPeer {
   963  		source := newTestPeer(name, t, cancel)
   964  		source.accountTrie = sourceAccountTrie
   965  		source.accountValues = elems
   966  		source.storageTries = storageTries
   967  		source.storageValues = storageElems
   968  		source.storageRequestHandler = handler
   969  		return source
   970  	}
   971  
   972  	syncer := setupSyncer(
   973  		mkSource("nice-a", defaultStorageRequestHandler),
   974  		mkSource("nice-b", defaultStorageRequestHandler),
   975  		mkSource("nice-c", defaultStorageRequestHandler),
   976  		mkSource("corrupt", noProofStorageRequestHandler),
   977  	)
   978  	done := checkStall(t, cancel)
   979  	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
   980  		t.Fatalf("sync failed: %v", err)
   981  	}
   982  	close(done)
   983  }
   984  
   985  type kv struct {
   986  	k, v []byte
   987  	t    bool
   988  }
   989  
   990  // Some helpers for sorting
   991  type entrySlice []*kv
   992  
   993  func (p entrySlice) Len() int           { return len(p) }
   994  func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
   995  func (p entrySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
   996  
   997  func key32(i uint64) []byte {
   998  	key := make([]byte, 32)
   999  	binary.LittleEndian.PutUint64(key, i)
  1000  	return key
  1001  }
  1002  
  1003  var (
  1004  	codehashes = []common.Hash{
  1005  		crypto.Keccak256Hash([]byte{0}),
  1006  		crypto.Keccak256Hash([]byte{1}),
  1007  		crypto.Keccak256Hash([]byte{2}),
  1008  		crypto.Keccak256Hash([]byte{3}),
  1009  		crypto.Keccak256Hash([]byte{4}),
  1010  		crypto.Keccak256Hash([]byte{5}),
  1011  		crypto.Keccak256Hash([]byte{6}),
  1012  		crypto.Keccak256Hash([]byte{7}),
  1013  	}
  1014  )
  1015  
  1016  // getACodeHash returns a pseudo-random code hash
  1017  func getACodeHash(i uint64) []byte {
  1018  	h := codehashes[int(i)%len(codehashes)]
  1019  	return common.CopyBytes(h[:])
  1020  }
  1021  
  1022  // convenience function to lookup the code from the code hash
  1023  func getCode(hash common.Hash) []byte {
  1024  	if hash == emptyCode {
  1025  		return nil
  1026  	}
  1027  	for i, h := range codehashes {
  1028  		if h == hash {
  1029  			return []byte{byte(i)}
  1030  		}
  1031  	}
  1032  	return nil
  1033  }
  1034  
  1035  // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1036  func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
  1037  	db := trie.NewDatabase(rawdb.NewMemoryDatabase())
  1038  	accTrie, _ := trie.New(common.Hash{}, db)
  1039  	var entries entrySlice
  1040  	for i := uint64(1); i <= uint64(n); i++ {
  1041  		value, _ := rlp.EncodeToBytes(state.Account{
  1042  			Nonce:    i,
  1043  			Balance:  big.NewInt(int64(i)),
  1044  			Root:     emptyRoot,
  1045  			CodeHash: getACodeHash(i),
  1046  		})
  1047  		key := key32(i)
  1048  		elem := &kv{key, value, false}
  1049  		accTrie.Update(elem.k, elem.v)
  1050  		entries = append(entries, elem)
  1051  	}
  1052  	sort.Sort(entries)
  1053  	// Push to disk layer
  1054  	accTrie.Commit(nil)
  1055  	return accTrie, entries
  1056  }
  1057  
  1058  // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1059  func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice,
  1060  	map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1061  
  1062  	var (
  1063  		db             = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1064  		accTrie, _     = trie.New(common.Hash{}, db)
  1065  		entries        entrySlice
  1066  		storageTries   = make(map[common.Hash]*trie.Trie)
  1067  		storageEntries = make(map[common.Hash]entrySlice)
  1068  	)
  1069  
  1070  	// Make a storage trie which we reuse for the whole lot
  1071  	stTrie, stEntries := makeStorageTrie(slots, db)
  1072  	stRoot := stTrie.Hash()
  1073  	// Create n accounts in the trie
  1074  	for i := uint64(1); i <= uint64(accounts); i++ {
  1075  		key := key32(i)
  1076  		codehash := emptyCode[:]
  1077  		if code {
  1078  			codehash = getACodeHash(i)
  1079  		}
  1080  		value, _ := rlp.EncodeToBytes(state.Account{
  1081  			Nonce:    i,
  1082  			Balance:  big.NewInt(int64(i)),
  1083  			Root:     stRoot,
  1084  			CodeHash: codehash,
  1085  		})
  1086  		elem := &kv{key, value, false}
  1087  		accTrie.Update(elem.k, elem.v)
  1088  		entries = append(entries, elem)
  1089  		// we reuse the same one for all accounts
  1090  		storageTries[common.BytesToHash(key)] = stTrie
  1091  		storageEntries[common.BytesToHash(key)] = stEntries
  1092  	}
  1093  	sort.Sort(entries)
  1094  	stTrie.Commit(nil)
  1095  	accTrie.Commit(nil)
  1096  	return accTrie, entries, storageTries, storageEntries
  1097  }
  1098  
  1099  // makeStorageTrie fills a storage trie with n items, returning the
  1100  // not-yet-committed trie and the sorted entries
  1101  func makeStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
  1102  	trie, _ := trie.New(common.Hash{}, db)
  1103  	var entries entrySlice
  1104  	for i := uint64(1); i <= uint64(n); i++ {
  1105  		// store 'i' at slot 'i'
  1106  		slotValue := key32(i)
  1107  		rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1108  
  1109  		slotKey := key32(i)
  1110  		key := crypto.Keccak256Hash(slotKey[:])
  1111  
  1112  		elem := &kv{key[:], rlpSlotValue, false}
  1113  		trie.Update(elem.k, elem.v)
  1114  		entries = append(entries, elem)
  1115  	}
  1116  	sort.Sort(entries)
  1117  	return trie, entries
  1118  }