github.com/klaytn/klaytn@v1.12.1/blockchain/state/sync_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  //
    17  // This file is derived from core/state/sync_test.go (2020/05/20).
    18  // Modified and improved for the klaytn development.
    19  
    20  package state
    21  
    22  import (
    23  	"bytes"
    24  	"errors"
    25  	"math/big"
    26  	"testing"
    27  
    28  	"github.com/alecthomas/units"
    29  	lru "github.com/hashicorp/golang-lru"
    30  	"github.com/klaytn/klaytn/blockchain/types/account"
    31  	"github.com/klaytn/klaytn/common"
    32  	"github.com/klaytn/klaytn/crypto"
    33  	"github.com/klaytn/klaytn/rlp"
    34  	"github.com/klaytn/klaytn/storage/database"
    35  	"github.com/klaytn/klaytn/storage/statedb"
    36  	"github.com/stretchr/testify/assert"
    37  )
    38  
    39  // testAccount is the data associated with an account used by the state tests.
    40  type testAccount struct {
    41  	address    common.Address
    42  	balance    *big.Int
    43  	nonce      uint64
    44  	code       []byte
    45  	storageMap map[common.Hash]common.Hash
    46  }
    47  
    48  // makeTestState create a sample test state to test node-wise reconstruction.
    49  func makeTestState(t *testing.T) (Database, common.Hash, []*testAccount) {
    50  	// Create an empty state
    51  	db := NewDatabase(database.NewMemoryDBManager())
    52  	statedb, err := New(common.Hash{}, db, nil, nil)
    53  	if err != nil {
    54  		t.Fatal(err)
    55  	}
    56  
    57  	// Fill it with some arbitrary data
    58  	var accounts []*testAccount
    59  	for i := byte(0); i < 96; i++ {
    60  		var obj *stateObject
    61  		acc := &testAccount{
    62  			address:    common.BytesToAddress([]byte{i}),
    63  			storageMap: make(map[common.Hash]common.Hash),
    64  		}
    65  
    66  		if i%3 > 0 {
    67  			obj = statedb.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
    68  		} else {
    69  			obj = statedb.GetOrNewSmartContract(common.BytesToAddress([]byte{i}))
    70  
    71  			obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i})
    72  			acc.code = []byte{i, i, i, i, i}
    73  			if i == 0 {
    74  				// to test emptyCodeHash
    75  				obj.SetCode(crypto.Keccak256Hash([]byte{}), []byte{})
    76  				acc.code = []byte{}
    77  			}
    78  
    79  			for j := 0; j < int(i)%10; j++ {
    80  				key := common.Hash{i + byte(j)}
    81  				value := common.Hash{i*2 + 1}
    82  				acc.storageMap[key] = value
    83  
    84  				obj.SetState(db, key, value)
    85  			}
    86  		}
    87  
    88  		obj.AddBalance(big.NewInt(int64(11 * i)))
    89  		acc.balance = big.NewInt(int64(11 * i))
    90  
    91  		obj.SetNonce(uint64(42 * i))
    92  		acc.nonce = uint64(42 * i)
    93  
    94  		statedb.updateStateObject(obj)
    95  		accounts = append(accounts, acc)
    96  	}
    97  	root, _ := statedb.Commit(false)
    98  
    99  	if err := checkStateConsistency(db.TrieDB().DiskDB(), root); err != nil {
   100  		t.Fatalf("inconsistent state trie at %x: %v", root, err)
   101  	}
   102  
   103  	// Return the generated state
   104  	return db, root, accounts
   105  }
   106  
   107  // checkStateAccounts cross references a reconstructed state with an expected
   108  // account array.
   109  func checkStateAccounts(t *testing.T, newDB database.DBManager, root common.Hash, accounts []*testAccount) {
   110  	// Check root availability and state contents
   111  	state, err := New(root, NewDatabase(newDB), nil, nil)
   112  	if err != nil {
   113  		t.Fatalf("failed to create state trie at %x: %v", root, err)
   114  	}
   115  	if err := checkStateConsistency(newDB, root); err != nil {
   116  		t.Fatalf("inconsistent state trie at %x: %v", root, err)
   117  	}
   118  	for i, acc := range accounts {
   119  		if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 {
   120  			t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance)
   121  		}
   122  		if nonce := state.GetNonce(acc.address); nonce != acc.nonce {
   123  			t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce)
   124  		}
   125  		if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) {
   126  			t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code)
   127  		}
   128  
   129  		// check storage trie
   130  		st := state.StorageTrie(acc.address)
   131  		it := statedb.NewIterator(st.NodeIterator(nil))
   132  		storageMapWithHashedKey := make(map[common.Hash]common.Hash)
   133  		for it.Next() {
   134  			storageMapWithHashedKey[common.BytesToHash(it.Key)] = common.BytesToHash(it.Value)
   135  		}
   136  		if len(storageMapWithHashedKey) != len(acc.storageMap) {
   137  			t.Errorf("account %d: stroage trie number mismatch: have %x, want %x", i, len(storageMapWithHashedKey), len(acc.storageMap))
   138  		}
   139  		for key, value := range acc.storageMap {
   140  			hk := crypto.Keccak256Hash(key[:])
   141  			if storageMapWithHashedKey[hk] != value {
   142  				t.Errorf("account %d: stroage trie (%v) mismatch: have %x, want %x", i, key.String(), acc.storageMap[key], value)
   143  			}
   144  		}
   145  	}
   146  }
   147  
   148  // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present.
   149  func checkTrieConsistency(db database.DBManager, root common.Hash) error {
   150  	if v, _ := db.ReadTrieNode(root.ExtendZero()); v == nil { // only works with hash32
   151  		return nil // Consider a non existent state consistent.
   152  	}
   153  	trie, err := statedb.NewTrie(root, statedb.NewDatabase(db), nil)
   154  	if err != nil {
   155  		return err
   156  	}
   157  	it := trie.NodeIterator(nil)
   158  	for it.Next(true) {
   159  	}
   160  	return it.Error()
   161  }
   162  
   163  // checkStateConsistency checks that all data of a state root is present.
   164  func checkStateConsistency(db database.DBManager, root common.Hash) error {
   165  	// Create and iterate a state trie rooted in a sub-node
   166  	if _, err := db.ReadTrieNode(root.ExtendZero()); err != nil { // only works with hash32
   167  		return nil // Consider a non existent state consistent.
   168  	}
   169  	state, err := New(root, NewDatabase(db), nil, nil)
   170  	if err != nil {
   171  		return err
   172  	}
   173  	it := NewNodeIterator(state)
   174  	for it.Next() {
   175  	}
   176  	return it.Error
   177  }
   178  
   179  // Tests that an empty state is not scheduled for syncing.
   180  func TestEmptyStateSync(t *testing.T) {
   181  	empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
   182  
   183  	// only bloom
   184  	{
   185  		db := database.NewMemoryDBManager()
   186  		sync := NewStateSync(empty, db, statedb.NewSyncBloom(1, db.GetMemDB()), nil, nil)
   187  		if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   188  			t.Errorf("content requested for empty state: %v", sync)
   189  		}
   190  	}
   191  
   192  	// only lru
   193  	{
   194  		lruCache, _ := lru.New(int(1 * units.MB / common.HashLength))
   195  		db := database.NewMemoryDBManager()
   196  		sync := NewStateSync(empty, db, statedb.NewSyncBloom(1, db.GetMemDB()), lruCache, nil)
   197  		if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   198  			t.Errorf("content requested for empty state: %v", sync)
   199  		}
   200  	}
   201  
   202  	// no bloom lru
   203  	{
   204  		db := database.NewMemoryDBManager()
   205  		sync := NewStateSync(empty, db, nil, nil, nil)
   206  		if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   207  			t.Errorf("content requested for empty state: %v", sync)
   208  		}
   209  	}
   210  
   211  	// both bloom, lru
   212  	{
   213  		bloom := statedb.NewSyncBloom(1, database.NewMemDB())
   214  		lruCache, _ := lru.New(int(1 * units.MB / common.HashLength))
   215  		db := database.NewMemoryDBManager()
   216  		sync := NewStateSync(empty, db, bloom, lruCache, nil)
   217  		if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   218  			t.Errorf("content requested for empty state: %v", sync)
   219  		}
   220  	}
   221  }
   222  
   223  // Tests that given a root hash, a state can sync iteratively on a single thread,
   224  // requesting retrieval tasks and returning all of them in one go.
   225  func TestIterativeStateSyncIndividual(t *testing.T) {
   226  	testIterativeStateSync(t, 1, false, false)
   227  }
   228  
   229  func TestIterativeStateSyncBatched(t *testing.T) {
   230  	testIterativeStateSync(t, 100, false, false)
   231  }
   232  
   233  func TestIterativeStateSyncIndividualFromDisk(t *testing.T) {
   234  	testIterativeStateSync(t, 1, true, false)
   235  }
   236  
   237  func TestIterativeStateSyncBatchedFromDisk(t *testing.T) {
   238  	testIterativeStateSync(t, 100, true, false)
   239  }
   240  
   241  func TestIterativeStateSyncIndividualByPath(t *testing.T) {
   242  	testIterativeStateSync(t, 1, false, true)
   243  }
   244  
   245  func TestIterativeStateSyncBatchedByPath(t *testing.T) {
   246  	testIterativeStateSync(t, 100, false, true)
   247  }
   248  
   249  func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
   250  	// Create a random state to copy
   251  	srcState, srcRoot, srcAccounts := makeTestState(t)
   252  	if commit {
   253  		srcState.TrieDB().Commit(srcRoot, false, 0)
   254  	}
   255  	srcTrie, _ := statedb.NewTrie(srcRoot, srcState.TrieDB(), nil)
   256  
   257  	// Create a destination state and sync with the scheduler
   258  	dstDiskDb := database.NewMemoryDBManager()
   259  	dstState := NewDatabase(dstDiskDb)
   260  	sched := NewStateSync(srcRoot, dstDiskDb, statedb.NewSyncBloom(1, dstDiskDb.GetMemDB()), nil, nil)
   261  
   262  	nodes, paths, codes := sched.Missing(count)
   263  	var (
   264  		hashQueue []common.Hash
   265  		pathQueue []statedb.SyncPath
   266  	)
   267  	if !bypath {
   268  		hashQueue = append(append(hashQueue[:0], nodes...), codes...)
   269  	} else {
   270  		hashQueue = append(hashQueue[:0], codes...)
   271  		pathQueue = append(pathQueue[:0], paths...)
   272  	}
   273  	for len(hashQueue)+len(pathQueue) > 0 {
   274  		results := make([]statedb.SyncResult, len(hashQueue)+len(pathQueue))
   275  		for i, hash := range hashQueue {
   276  			data, err := srcState.TrieDB().Node(hash.ExtendZero())
   277  			if err != nil {
   278  				data, err = srcState.ContractCode(hash)
   279  			}
   280  			if err != nil {
   281  				t.Fatalf("failed to retrieve node data for %x", hash)
   282  			}
   283  			results[i] = statedb.SyncResult{Hash: hash, Data: data}
   284  		}
   285  		for i, path := range pathQueue {
   286  			if len(path) == 1 {
   287  				data, _, err := srcTrie.TryGetNode(path[0])
   288  				if err != nil {
   289  					t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
   290  				}
   291  				results[len(hashQueue)+i] = statedb.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
   292  			} else {
   293  				serializer := account.NewAccountSerializer()
   294  				if err := rlp.DecodeBytes(srcTrie.Get(path[0]), serializer); err != nil {
   295  					t.Fatalf("failed to decode account on path %x: %v", path, err)
   296  				}
   297  				acc := serializer.GetAccount()
   298  				pacc := account.GetProgramAccount(acc)
   299  				if pacc == nil {
   300  					t.Errorf("failed to get contract")
   301  				}
   302  				stTrie, err := statedb.NewStorageTrie(pacc.GetStorageRoot(), srcState.TrieDB(), nil)
   303  				if err != nil {
   304  					t.Fatalf("failed to retriev storage trie for path %x: %v", path, err)
   305  				}
   306  				data, _, err := stTrie.TryGetNode(path[1])
   307  				if err != nil {
   308  					t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
   309  				}
   310  				results[len(hashQueue)+i] = statedb.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
   311  			}
   312  		}
   313  		for index, result := range results {
   314  			if err := sched.Process(result); err != nil {
   315  				t.Fatalf("failed to process result #%d: %v", index, err)
   316  			}
   317  		}
   318  		batch := dstDiskDb.NewBatch(database.StateTrieDB)
   319  		if _, err := sched.Commit(batch); err != nil {
   320  			t.Fatalf("failed to commit data: %v", err)
   321  		}
   322  		batch.Write()
   323  
   324  		nodes, paths, codes = sched.Missing(count)
   325  		if !bypath {
   326  			hashQueue = append(append(hashQueue[:0], nodes...), codes...)
   327  		} else {
   328  			hashQueue = append(hashQueue[:0], codes...)
   329  			pathQueue = append(pathQueue[:0], paths...)
   330  		}
   331  	}
   332  	// Cross check that the two states are in sync
   333  	checkStateAccounts(t, dstDiskDb, srcRoot, srcAccounts)
   334  
   335  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   336  	assert.NoError(t, err)
   337  
   338  	// Test with quit channel
   339  	quit := make(chan struct{})
   340  
   341  	// normal
   342  	err = CheckStateConsistency(srcState, dstState, srcRoot, 100, quit)
   343  	assert.NoError(t, err)
   344  
   345  	// quit
   346  	close(quit)
   347  	err = CheckStateConsistency(srcState, dstState, srcRoot, 100, quit)
   348  	assert.Error(t, err, errStopByQuit)
   349  }
   350  
   351  func TestCheckStateConsistencyMissNode(t *testing.T) {
   352  	// Create a random state to copy
   353  	srcState, srcRoot, srcAccounts := makeTestState(t)
   354  	newState, newRoot, _ := makeTestState(t)
   355  	// commit stateTrie to DB
   356  	srcState.TrieDB().Commit(srcRoot, false, 0)
   357  	newState.TrieDB().Commit(newRoot, false, 0)
   358  
   359  	isCode := func(hash common.Hash) bool {
   360  		for _, acc := range srcAccounts {
   361  			if hash == crypto.Keccak256Hash(acc.code) {
   362  				return true
   363  			}
   364  		}
   365  		return false
   366  	}
   367  
   368  	srcStateDB, err := New(srcRoot, srcState, nil, nil)
   369  	assert.NoError(t, err)
   370  
   371  	it := NewNodeIterator(srcStateDB)
   372  	it.Next() // skip trie root node
   373  
   374  	for it.Next() {
   375  		if !common.EmptyHash(it.Hash) {
   376  			var (
   377  				codehash = it.Hash
   378  				nodehash = it.Hash.ExtendZero()
   379  				data     []byte
   380  				code     = isCode(it.Hash)
   381  				err      error
   382  			)
   383  			srcDiskDB := srcState.TrieDB().DiskDB()
   384  			newDiskDB := newState.TrieDB().DiskDB()
   385  			// Delete trie nodes or codes
   386  			if code {
   387  				data = srcDiskDB.ReadCode(codehash)
   388  				srcState.DeleteCode(codehash)
   389  				newState.DeleteCode(codehash)
   390  			} else {
   391  				data, _ = srcDiskDB.ReadTrieNode(nodehash)
   392  				srcDiskDB.DeleteTrieNode(nodehash)
   393  				newDiskDB.DeleteTrieNode(nodehash)
   394  			}
   395  			// Check consistency : errIterator
   396  			err = CheckStateConsistency(srcState, newState, srcRoot, 100, nil)
   397  			if !errors.Is(err, errIterator) {
   398  				t.Log("mismatched err", "err", err, "expErr", errIterator)
   399  				t.FailNow()
   400  			}
   401  
   402  			// Recover nodes
   403  			if code {
   404  				srcDiskDB.WriteCode(codehash, data)
   405  				newDiskDB.WriteCode(codehash, data)
   406  			} else {
   407  				srcDiskDB.WriteTrieNode(nodehash, data)
   408  				newDiskDB.WriteTrieNode(nodehash, data)
   409  			}
   410  		}
   411  	}
   412  
   413  	// Check consistency : no error
   414  	err = CheckStateConsistency(srcState, newState, srcRoot, 100, nil)
   415  	assert.NoError(t, err)
   416  
   417  	err = CheckStateConsistencyParallel(srcState, newState, srcRoot, nil)
   418  	assert.NoError(t, err)
   419  }
   420  
   421  // Tests that the trie scheduler can correctly reconstruct the state even if only
   422  // partial results are returned, and the others sent only later.
   423  func TestIterativeDelayedStateSync(t *testing.T) {
   424  	// Create a random state to copy
   425  	srcState, srcRoot, srcAccounts := makeTestState(t)
   426  	srcState.TrieDB().Commit(srcRoot, false, 0)
   427  
   428  	// Create a destination state and sync with the scheduler
   429  	dstDiskDB := database.NewMemoryDBManager()
   430  	dstState := NewDatabase(dstDiskDB)
   431  	sched := NewStateSync(srcRoot, dstDiskDB, statedb.NewSyncBloom(1, dstDiskDB.GetMemDB()), nil, nil)
   432  
   433  	nodes, _, codes := sched.Missing(0)
   434  	queue := append(append([]common.Hash{}, nodes...), codes...)
   435  
   436  	for len(queue) > 0 {
   437  		// Sync only half of the scheduled nodes
   438  		results := make([]statedb.SyncResult, len(queue)/2+1)
   439  		for i, hash := range queue[:len(results)] {
   440  			data, err := srcState.TrieDB().Node(hash.ExtendZero())
   441  			if err != nil {
   442  				data, err = srcState.ContractCode(hash)
   443  			}
   444  			if err != nil {
   445  				t.Fatalf("failed to retrieve node data for %x", hash)
   446  			}
   447  			results[i] = statedb.SyncResult{Hash: hash, Data: data}
   448  		}
   449  		for index, result := range results {
   450  			if err := sched.Process(result); err != nil {
   451  				t.Fatalf("failed to process result #%d: %v", index, err)
   452  			}
   453  		}
   454  		batch := dstDiskDB.NewBatch(database.StateTrieDB)
   455  		if _, err := sched.Commit(batch); err != nil {
   456  			t.Fatalf("failed to commit data: %v", err)
   457  		}
   458  		batch.Write()
   459  		nodes, _, codes := sched.Missing(0)
   460  		queue = append(append(queue[len(results):], nodes...), codes...)
   461  	}
   462  	// Cross check that the two states are in sync
   463  	checkStateAccounts(t, dstDiskDB, srcRoot, srcAccounts)
   464  
   465  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   466  	assert.NoError(t, err)
   467  
   468  	err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   469  	assert.NoError(t, err)
   470  }
   471  
   472  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   473  // requesting retrieval tasks and returning all of them in one go, however in a
   474  // random order.
   475  func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
   476  func TestIterativeRandomStateSyncBatched(t *testing.T)    { testIterativeRandomStateSync(t, 100) }
   477  
   478  func testIterativeRandomStateSync(t *testing.T, count int) {
   479  	// Create a random state to copy
   480  	srcState, srcRoot, srcAccounts := makeTestState(t)
   481  	srcState.TrieDB().Commit(srcRoot, false, 0)
   482  
   483  	// Create a destination state and sync with the scheduler
   484  	dstDb := database.NewMemoryDBManager()
   485  	dstState := NewDatabase(dstDb)
   486  	sched := NewStateSync(srcRoot, dstDb, statedb.NewSyncBloom(1, dstDb.GetMemDB()), nil, nil)
   487  
   488  	queue := make(map[common.Hash]struct{})
   489  	nodes, _, codes := sched.Missing(count)
   490  	for _, hash := range append(nodes, codes...) {
   491  		queue[hash] = struct{}{}
   492  	}
   493  	for len(queue) > 0 {
   494  		// Fetch all the queued nodes in a random order
   495  		results := make([]statedb.SyncResult, 0, len(queue))
   496  		for hash := range queue {
   497  			data, err := srcState.TrieDB().Node(hash.ExtendZero())
   498  			if err != nil {
   499  				data, err = srcState.ContractCode(hash)
   500  			}
   501  			if err != nil {
   502  				t.Fatalf("failed to retrieve node data for %x", hash)
   503  			}
   504  			results = append(results, statedb.SyncResult{Hash: hash, Data: data})
   505  		}
   506  		// Feed the retrieved results back and queue new tasks
   507  		for index, result := range results {
   508  			if err := sched.Process(result); err != nil {
   509  				t.Fatalf("failed to process result #%d: %v", index, err)
   510  			}
   511  		}
   512  		batch := dstDb.NewBatch(database.StateTrieDB)
   513  		if _, err := sched.Commit(batch); err != nil {
   514  			t.Fatalf("failed to commit data: %v", err)
   515  		}
   516  		batch.Write()
   517  		queue = make(map[common.Hash]struct{})
   518  		nodes, _, codes := sched.Missing(0)
   519  		for _, hash := range append(nodes, codes...) {
   520  			queue[hash] = struct{}{}
   521  		}
   522  	}
   523  	// Cross check that the two states are in sync
   524  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   525  
   526  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   527  	assert.NoError(t, err)
   528  
   529  	err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   530  	assert.NoError(t, err)
   531  }
   532  
   533  // Tests that the trie scheduler can correctly reconstruct the state even if only
   534  // partial results are returned (Even those randomly), others sent only later.
   535  func TestIterativeRandomDelayedStateSync(t *testing.T) {
   536  	// Create a random state to copy
   537  	srcState, srcRoot, srcAccounts := makeTestState(t)
   538  	srcState.TrieDB().Commit(srcRoot, false, 0)
   539  
   540  	// Create a destination state and sync with the scheduler
   541  	dstDb := database.NewMemoryDBManager()
   542  	dstState := NewDatabase(dstDb)
   543  	sched := NewStateSync(srcRoot, dstDb, statedb.NewSyncBloom(1, dstDb.GetMemDB()), nil, nil)
   544  
   545  	queue := make(map[common.Hash]struct{})
   546  	nodes, _, codes := sched.Missing(0)
   547  	for _, hash := range append(nodes, codes...) {
   548  		queue[hash] = struct{}{}
   549  	}
   550  	for len(queue) > 0 {
   551  		// Sync only half of the scheduled nodes, even those in random order
   552  		results := make([]statedb.SyncResult, 0, len(queue)/2+1)
   553  		for hash := range queue {
   554  			delete(queue, hash)
   555  
   556  			data, err := srcState.TrieDB().Node(hash.ExtendZero())
   557  			if err != nil {
   558  				data, err = srcState.ContractCode(hash)
   559  			}
   560  			if err != nil {
   561  				t.Fatalf("failed to retrieve node data for %x", hash)
   562  			}
   563  			results = append(results, statedb.SyncResult{Hash: hash, Data: data})
   564  
   565  			if len(results) >= cap(results) {
   566  				break
   567  			}
   568  		}
   569  		// Feed the retrieved results back and queue new tasks
   570  		for index, result := range results {
   571  			if err := sched.Process(result); err != nil {
   572  				t.Fatalf("failed to process result #%d: %v", index, err)
   573  			}
   574  		}
   575  		batch := dstDb.NewBatch(database.StateTrieDB)
   576  		if _, err := sched.Commit(batch); err != nil {
   577  			t.Fatalf("failed to commit data: %v", err)
   578  		}
   579  		batch.Write()
   580  		for _, result := range results {
   581  			delete(queue, result.Hash)
   582  		}
   583  		nodes, _, codes = sched.Missing(0)
   584  		for _, hash := range append(nodes, codes...) {
   585  			queue[hash] = struct{}{}
   586  		}
   587  	}
   588  	// Cross check that the two states are in sync
   589  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   590  
   591  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   592  	assert.NoError(t, err)
   593  
   594  	err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   595  	assert.NoError(t, err)
   596  }
   597  
   598  // Tests that at any point in time during a sync, only complete sub-tries are in
   599  // the database.
   600  func TestIncompleteStateSync(t *testing.T) {
   601  	// Create a random state to copy
   602  	srcState, srcRoot, srcAccounts := makeTestState(t)
   603  	srcState.TrieDB().Commit(srcRoot, false, 0)
   604  
   605  	// isCode reports whether the hash is contract code hash.
   606  	isCode := func(hash common.Hash) bool {
   607  		for _, acc := range srcAccounts {
   608  			if hash == crypto.Keccak256Hash(acc.code) {
   609  				return true
   610  			}
   611  		}
   612  		return false
   613  	}
   614  	checkTrieConsistency(srcState.TrieDB().DiskDB(), srcRoot)
   615  
   616  	// Create a destination state and sync with the scheduler
   617  	dstDb := database.NewMemoryDBManager()
   618  	dstState := NewDatabase(dstDb)
   619  	sched := NewStateSync(srcRoot, dstDb, statedb.NewSyncBloom(1, dstDb.GetMemDB()), nil, nil)
   620  
   621  	var added []common.Hash
   622  
   623  	nodes, _, codes := sched.Missing(1)
   624  	queue := append(append([]common.Hash{}, nodes...), codes...)
   625  
   626  	for len(queue) > 0 {
   627  		// Fetch a batch of state nodes
   628  		results := make([]statedb.SyncResult, len(queue))
   629  		for i, hash := range queue {
   630  			data, err := srcState.TrieDB().Node(hash.ExtendZero())
   631  			if err != nil {
   632  				data, err = srcState.ContractCode(hash)
   633  			}
   634  			if err != nil {
   635  				t.Fatalf("failed to retrieve node data for %x", hash)
   636  			}
   637  			results[i] = statedb.SyncResult{Hash: hash, Data: data}
   638  		}
   639  		// Process each of the state nodes
   640  		for index, result := range results {
   641  			if err := sched.Process(result); err != nil {
   642  				t.Fatalf("failed to process result #%d: %v", index, err)
   643  			}
   644  		}
   645  		batch := dstDb.NewBatch(database.StateTrieDB)
   646  		if _, err := sched.Commit(batch); err != nil {
   647  			t.Fatalf("failed to commit data: %v", err)
   648  		}
   649  		batch.Write()
   650  		for _, result := range results {
   651  			added = append(added, result.Hash)
   652  		}
   653  		// Check that all known sub-tries added so far are complete or missing entirely.
   654  		for _, hash := range added {
   655  			if isCode(hash) {
   656  				continue
   657  			}
   658  			// Can't use checkStateConsistency here because subtrie keys may have odd
   659  			// length and crash in LeafKey.
   660  			if err := checkTrieConsistency(dstDb, hash); err != nil {
   661  				t.Fatalf("state inconsistent: %v", err)
   662  			}
   663  		}
   664  		// Fetch the next batch to retrieve
   665  		nodes, _, codes = sched.Missing(1)
   666  		queue = append(append(queue[:0], nodes...), codes...)
   667  	}
   668  	// Sanity check that removing any node from the database is detected
   669  	for _, hash := range added[1:] {
   670  		var (
   671  			key      = hash.Bytes()
   672  			code     = isCode(hash)
   673  			val      []byte
   674  			codehash = hash
   675  			nodehash = hash.ExtendZero()
   676  		)
   677  		if code {
   678  			val = dstDb.ReadCode(codehash)
   679  			dstState.DeleteCode(codehash)
   680  		} else {
   681  			val, _ = dstDb.ReadTrieNode(nodehash)
   682  			dstDb.DeleteTrieNode(nodehash)
   683  		}
   684  
   685  		if err := checkStateConsistency(dstDb, added[0]); err == nil {
   686  			t.Fatalf("trie inconsistency not caught, missing: %x", key)
   687  		}
   688  
   689  		err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   690  		assert.Error(t, err)
   691  
   692  		err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   693  		assert.Error(t, err)
   694  
   695  		if code {
   696  			dstDb.WriteCode(codehash, val)
   697  		} else {
   698  			dstDb.WriteTrieNode(nodehash, val)
   699  		}
   700  	}
   701  
   702  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   703  	assert.NoError(t, err)
   704  
   705  	err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   706  	assert.NoError(t, err)
   707  }