github.com/klaytn/klaytn@v1.10.2/blockchain/state/sync_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  //
    17  // This file is derived from core/state/sync_test.go (2020/05/20).
    18  // Modified and improved for the klaytn development.
    19  
    20  package state
    21  
    22  import (
    23  	"bytes"
    24  	"errors"
    25  	"math/big"
    26  	"testing"
    27  
    28  	"github.com/alecthomas/units"
    29  	lru "github.com/hashicorp/golang-lru"
    30  	"github.com/klaytn/klaytn/blockchain/types/account"
    31  	"github.com/klaytn/klaytn/common"
    32  	"github.com/klaytn/klaytn/crypto"
    33  	"github.com/klaytn/klaytn/rlp"
    34  	"github.com/klaytn/klaytn/storage/database"
    35  	"github.com/klaytn/klaytn/storage/statedb"
    36  	"github.com/stretchr/testify/assert"
    37  )
    38  
    39  // testAccount is the data associated with an account used by the state tests.
    40  type testAccount struct {
    41  	address    common.Address
    42  	balance    *big.Int
    43  	nonce      uint64
    44  	code       []byte
    45  	storageMap map[common.Hash]common.Hash
    46  }
    47  
    48  // makeTestState create a sample test state to test node-wise reconstruction.
    49  func makeTestState(t *testing.T) (Database, common.Hash, []*testAccount) {
    50  	// Create an empty state
    51  	db := NewDatabase(database.NewMemoryDBManager())
    52  	statedb, err := New(common.Hash{}, db, nil)
    53  	if err != nil {
    54  		t.Fatal(err)
    55  	}
    56  
    57  	// Fill it with some arbitrary data
    58  	var accounts []*testAccount
    59  	for i := byte(0); i < 96; i++ {
    60  		var obj *stateObject
    61  		acc := &testAccount{
    62  			address:    common.BytesToAddress([]byte{i}),
    63  			storageMap: make(map[common.Hash]common.Hash),
    64  		}
    65  
    66  		if i%3 > 0 {
    67  			obj = statedb.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
    68  		} else {
    69  			obj = statedb.GetOrNewSmartContract(common.BytesToAddress([]byte{i}))
    70  
    71  			obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i})
    72  			acc.code = []byte{i, i, i, i, i}
    73  			if i == 0 {
    74  				// to test emptyCodeHash
    75  				obj.SetCode(crypto.Keccak256Hash([]byte{}), []byte{})
    76  				acc.code = []byte{}
    77  			}
    78  
    79  			for j := 0; j < int(i)%10; j++ {
    80  				key := common.Hash{i + byte(j)}
    81  				value := common.Hash{i*2 + 1}
    82  				acc.storageMap[key] = value
    83  
    84  				obj.SetState(db, key, value)
    85  			}
    86  		}
    87  
    88  		obj.AddBalance(big.NewInt(int64(11 * i)))
    89  		acc.balance = big.NewInt(int64(11 * i))
    90  
    91  		obj.SetNonce(uint64(42 * i))
    92  		acc.nonce = uint64(42 * i)
    93  
    94  		statedb.updateStateObject(obj)
    95  		accounts = append(accounts, acc)
    96  	}
    97  	root, _ := statedb.Commit(false)
    98  
    99  	if err := checkStateConsistency(db.TrieDB().DiskDB(), root); err != nil {
   100  		t.Fatalf("inconsistent state trie at %x: %v", root, err)
   101  	}
   102  
   103  	// Return the generated state
   104  	return db, root, accounts
   105  }
   106  
   107  // checkStateAccounts cross references a reconstructed state with an expected
   108  // account array.
   109  func checkStateAccounts(t *testing.T, newDB database.DBManager, root common.Hash, accounts []*testAccount) {
   110  	// Check root availability and state contents
   111  	state, err := New(root, NewDatabase(newDB), nil)
   112  	if err != nil {
   113  		t.Fatalf("failed to create state trie at %x: %v", root, err)
   114  	}
   115  	if err := checkStateConsistency(newDB, root); err != nil {
   116  		t.Fatalf("inconsistent state trie at %x: %v", root, err)
   117  	}
   118  	for i, acc := range accounts {
   119  		if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 {
   120  			t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance)
   121  		}
   122  		if nonce := state.GetNonce(acc.address); nonce != acc.nonce {
   123  			t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce)
   124  		}
   125  		if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) {
   126  			t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code)
   127  		}
   128  
   129  		// check storage trie
   130  		st := state.StorageTrie(acc.address)
   131  		it := statedb.NewIterator(st.NodeIterator(nil))
   132  		storageMapWithHashedKey := make(map[common.Hash]common.Hash)
   133  		for it.Next() {
   134  			storageMapWithHashedKey[common.BytesToHash(it.Key)] = common.BytesToHash(it.Value)
   135  		}
   136  		if len(storageMapWithHashedKey) != len(acc.storageMap) {
   137  			t.Errorf("account %d: stroage trie number mismatch: have %x, want %x", i, len(storageMapWithHashedKey), len(acc.storageMap))
   138  		}
   139  		for key, value := range acc.storageMap {
   140  			hk := crypto.Keccak256Hash(key[:])
   141  			if storageMapWithHashedKey[hk] != value {
   142  				t.Errorf("account %d: stroage trie (%v) mismatch: have %x, want %x", i, key.String(), acc.storageMap[key], value)
   143  			}
   144  		}
   145  	}
   146  }
   147  
   148  // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present.
   149  func checkTrieConsistency(db database.DBManager, root common.Hash) error {
   150  	if v, _ := db.ReadStateTrieNode(root[:]); v == nil {
   151  		return nil // Consider a non existent state consistent.
   152  	}
   153  	trie, err := statedb.NewTrie(root, statedb.NewDatabase(db))
   154  	if err != nil {
   155  		return err
   156  	}
   157  	it := trie.NodeIterator(nil)
   158  	for it.Next(true) {
   159  	}
   160  	return it.Error()
   161  }
   162  
   163  // checkStateConsistency checks that all data of a state root is present.
   164  func checkStateConsistency(db database.DBManager, root common.Hash) error {
   165  	// Create and iterate a state trie rooted in a sub-node
   166  	if _, err := db.ReadStateTrieNode(root.Bytes()); err != nil {
   167  		return nil // Consider a non existent state consistent.
   168  	}
   169  	state, err := New(root, NewDatabase(db), nil)
   170  	if err != nil {
   171  		return err
   172  	}
   173  	it := NewNodeIterator(state)
   174  	for it.Next() {
   175  	}
   176  	return it.Error
   177  }
   178  
   179  // Tests that an empty state is not scheduled for syncing.
   180  func TestEmptyStateSync(t *testing.T) {
   181  	empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
   182  
   183  	// only bloom
   184  	{
   185  		db := database.NewMemoryDBManager()
   186  		sync := NewStateSync(empty, db, statedb.NewSyncBloom(1, db.GetMemDB()), nil, nil)
   187  		if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   188  			t.Errorf("content requested for empty state: %v", sync)
   189  		}
   190  	}
   191  
   192  	// only lru
   193  	{
   194  		lruCache, _ := lru.New(int(1 * units.MB / common.HashLength))
   195  		db := database.NewMemoryDBManager()
   196  		sync := NewStateSync(empty, db, statedb.NewSyncBloom(1, db.GetMemDB()), lruCache, nil)
   197  		if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   198  			t.Errorf("content requested for empty state: %v", sync)
   199  		}
   200  	}
   201  
   202  	// no bloom lru
   203  	{
   204  		db := database.NewMemoryDBManager()
   205  		sync := NewStateSync(empty, db, nil, nil, nil)
   206  		if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   207  			t.Errorf("content requested for empty state: %v", sync)
   208  		}
   209  	}
   210  
   211  	// both bloom, lru
   212  	{
   213  		bloom := statedb.NewSyncBloom(1, database.NewMemDB())
   214  		lruCache, _ := lru.New(int(1 * units.MB / common.HashLength))
   215  		db := database.NewMemoryDBManager()
   216  		sync := NewStateSync(empty, db, bloom, lruCache, nil)
   217  		if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   218  			t.Errorf("content requested for empty state: %v", sync)
   219  		}
   220  	}
   221  }
   222  
   223  // Tests that given a root hash, a state can sync iteratively on a single thread,
   224  // requesting retrieval tasks and returning all of them in one go.
   225  func TestIterativeStateSyncIndividual(t *testing.T) {
   226  	testIterativeStateSync(t, 1, false, false)
   227  }
   228  
   229  func TestIterativeStateSyncBatched(t *testing.T) {
   230  	testIterativeStateSync(t, 100, false, false)
   231  }
   232  
   233  func TestIterativeStateSyncIndividualFromDisk(t *testing.T) {
   234  	testIterativeStateSync(t, 1, true, false)
   235  }
   236  
   237  func TestIterativeStateSyncBatchedFromDisk(t *testing.T) {
   238  	testIterativeStateSync(t, 100, true, false)
   239  }
   240  
   241  func TestIterativeStateSyncIndividualByPath(t *testing.T) {
   242  	testIterativeStateSync(t, 1, false, true)
   243  }
   244  
   245  func TestIterativeStateSyncBatchedByPath(t *testing.T) {
   246  	testIterativeStateSync(t, 100, false, true)
   247  }
   248  
   249  func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
   250  	// Create a random state to copy
   251  	srcState, srcRoot, srcAccounts := makeTestState(t)
   252  	if commit {
   253  		srcState.TrieDB().Commit(srcRoot, false, 0)
   254  	}
   255  	srcTrie, _ := statedb.NewTrie(srcRoot, srcState.TrieDB())
   256  
   257  	// Create a destination state and sync with the scheduler
   258  	dstDiskDb := database.NewMemoryDBManager()
   259  	dstState := NewDatabase(dstDiskDb)
   260  	sched := NewStateSync(srcRoot, dstDiskDb, statedb.NewSyncBloom(1, dstDiskDb.GetMemDB()), nil, nil)
   261  
   262  	nodes, paths, codes := sched.Missing(count)
   263  	var (
   264  		hashQueue []common.Hash
   265  		pathQueue []statedb.SyncPath
   266  	)
   267  	if !bypath {
   268  		hashQueue = append(append(hashQueue[:0], nodes...), codes...)
   269  	} else {
   270  		hashQueue = append(hashQueue[:0], codes...)
   271  		pathQueue = append(pathQueue[:0], paths...)
   272  	}
   273  	for len(hashQueue)+len(pathQueue) > 0 {
   274  		results := make([]statedb.SyncResult, len(hashQueue)+len(pathQueue))
   275  		for i, hash := range hashQueue {
   276  			data, err := srcState.TrieDB().Node(hash)
   277  			if err != nil {
   278  				data, err = srcState.ContractCode(hash)
   279  			}
   280  			if err != nil {
   281  				t.Fatalf("failed to retrieve node data for %x", hash)
   282  			}
   283  			results[i] = statedb.SyncResult{Hash: hash, Data: data}
   284  		}
   285  		for i, path := range pathQueue {
   286  			if len(path) == 1 {
   287  				data, _, err := srcTrie.TryGetNode(path[0])
   288  				if err != nil {
   289  					t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
   290  				}
   291  				results[len(hashQueue)+i] = statedb.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
   292  			} else {
   293  				serializer := account.NewAccountSerializer()
   294  				if err := rlp.DecodeBytes(srcTrie.Get(path[0]), serializer); err != nil {
   295  					t.Fatalf("failed to decode account on path %x: %v", path, err)
   296  				}
   297  				acc := serializer.GetAccount()
   298  				pacc := account.GetProgramAccount(acc)
   299  				if pacc == nil {
   300  					t.Errorf("failed to get contract")
   301  				}
   302  				stTrie, err := statedb.NewTrie(pacc.GetStorageRoot(), srcState.TrieDB())
   303  				if err != nil {
   304  					t.Fatalf("failed to retriev storage trie for path %x: %v", path, err)
   305  				}
   306  				data, _, err := stTrie.TryGetNode(path[1])
   307  				if err != nil {
   308  					t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
   309  				}
   310  				results[len(hashQueue)+i] = statedb.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
   311  			}
   312  		}
   313  		for index, result := range results {
   314  			if err := sched.Process(result); err != nil {
   315  				t.Fatalf("failed to process result #%d: %v", index, err)
   316  			}
   317  		}
   318  		batch := dstDiskDb.NewBatch(database.StateTrieDB)
   319  		if _, err := sched.Commit(batch); err != nil {
   320  			t.Fatalf("failed to commit data: %v", err)
   321  		}
   322  		batch.Write()
   323  
   324  		nodes, paths, codes = sched.Missing(count)
   325  		if !bypath {
   326  			hashQueue = append(append(hashQueue[:0], nodes...), codes...)
   327  		} else {
   328  			hashQueue = append(hashQueue[:0], codes...)
   329  			pathQueue = append(pathQueue[:0], paths...)
   330  		}
   331  	}
   332  	// Cross check that the two states are in sync
   333  	checkStateAccounts(t, dstDiskDb, srcRoot, srcAccounts)
   334  
   335  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   336  	assert.NoError(t, err)
   337  
   338  	// Test with quit channel
   339  	quit := make(chan struct{})
   340  
   341  	// normal
   342  	err = CheckStateConsistency(srcState, dstState, srcRoot, 100, quit)
   343  	assert.NoError(t, err)
   344  
   345  	// quit
   346  	close(quit)
   347  	err = CheckStateConsistency(srcState, dstState, srcRoot, 100, quit)
   348  	assert.Error(t, err, errStopByQuit)
   349  }
   350  
   351  func TestCheckStateConsistencyMissNode(t *testing.T) {
   352  	// Create a random state to copy
   353  	srcState, srcRoot, srcAccounts := makeTestState(t)
   354  	newState, newRoot, _ := makeTestState(t)
   355  	// commit stateTrie to DB
   356  	srcState.TrieDB().Commit(srcRoot, false, 0)
   357  	newState.TrieDB().Commit(newRoot, false, 0)
   358  
   359  	isCode := func(hash common.Hash) bool {
   360  		for _, acc := range srcAccounts {
   361  			if hash == crypto.Keccak256Hash(acc.code) {
   362  				return true
   363  			}
   364  		}
   365  		return false
   366  	}
   367  
   368  	srcStateDB, err := New(srcRoot, srcState, nil)
   369  	assert.NoError(t, err)
   370  
   371  	it := NewNodeIterator(srcStateDB)
   372  	it.Next() // skip trie root node
   373  
   374  	for it.Next() {
   375  		if !common.EmptyHash(it.Hash) {
   376  			hash := it.Hash
   377  			var (
   378  				data []byte
   379  				code = isCode(hash)
   380  				err  error
   381  			)
   382  			srcDiskDB := srcState.TrieDB().DiskDB()
   383  			newDiskDB := newState.TrieDB().DiskDB()
   384  			// Delete trie nodes or codes
   385  			if code {
   386  				data = srcDiskDB.ReadCode(hash)
   387  				srcState.DeleteCode(hash)
   388  				newState.DeleteCode(hash)
   389  			} else {
   390  				data, _ = srcDiskDB.ReadCachedTrieNode(hash)
   391  				srcDiskDB.GetMemDB().Delete(hash[:])
   392  				newDiskDB.GetMemDB().Delete(hash[:])
   393  			}
   394  			// Check consistency : errIterator
   395  			err = CheckStateConsistency(srcState, newState, srcRoot, 100, nil)
   396  			if !errors.Is(err, errIterator) {
   397  				t.Log("mismatched err", "err", err, "expErr", errIterator)
   398  				t.FailNow()
   399  			}
   400  
   401  			// Recover nodes
   402  			srcDiskDB.GetMemDB().Put(hash[:], data)
   403  			newDiskDB.GetMemDB().Put(hash[:], data)
   404  		}
   405  	}
   406  
   407  	// Check consistency : no error
   408  	err = CheckStateConsistency(srcState, newState, srcRoot, 100, nil)
   409  	assert.NoError(t, err)
   410  
   411  	err = CheckStateConsistencyParallel(srcState, newState, srcRoot, nil)
   412  	assert.NoError(t, err)
   413  }
   414  
   415  // Tests that the trie scheduler can correctly reconstruct the state even if only
   416  // partial results are returned, and the others sent only later.
   417  func TestIterativeDelayedStateSync(t *testing.T) {
   418  	// Create a random state to copy
   419  	srcState, srcRoot, srcAccounts := makeTestState(t)
   420  	srcState.TrieDB().Commit(srcRoot, false, 0)
   421  
   422  	// Create a destination state and sync with the scheduler
   423  	dstDiskDB := database.NewMemoryDBManager()
   424  	dstState := NewDatabase(dstDiskDB)
   425  	sched := NewStateSync(srcRoot, dstDiskDB, statedb.NewSyncBloom(1, dstDiskDB.GetMemDB()), nil, nil)
   426  
   427  	nodes, _, codes := sched.Missing(0)
   428  	queue := append(append([]common.Hash{}, nodes...), codes...)
   429  
   430  	for len(queue) > 0 {
   431  		// Sync only half of the scheduled nodes
   432  		results := make([]statedb.SyncResult, len(queue)/2+1)
   433  		for i, hash := range queue[:len(results)] {
   434  			data, err := srcState.TrieDB().Node(hash)
   435  			if err != nil {
   436  				data, err = srcState.ContractCode(hash)
   437  			}
   438  			if err != nil {
   439  				t.Fatalf("failed to retrieve node data for %x", hash)
   440  			}
   441  			results[i] = statedb.SyncResult{Hash: hash, Data: data}
   442  		}
   443  		for index, result := range results {
   444  			if err := sched.Process(result); err != nil {
   445  				t.Fatalf("failed to process result #%d: %v", index, err)
   446  			}
   447  		}
   448  		batch := dstDiskDB.NewBatch(database.StateTrieDB)
   449  		if _, err := sched.Commit(batch); err != nil {
   450  			t.Fatalf("failed to commit data: %v", err)
   451  		}
   452  		batch.Write()
   453  		nodes, _, codes := sched.Missing(0)
   454  		queue = append(append(queue[len(results):], nodes...), codes...)
   455  	}
   456  	// Cross check that the two states are in sync
   457  	checkStateAccounts(t, dstDiskDB, srcRoot, srcAccounts)
   458  
   459  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   460  	assert.NoError(t, err)
   461  
   462  	err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   463  	assert.NoError(t, err)
   464  }
   465  
   466  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   467  // requesting retrieval tasks and returning all of them in one go, however in a
   468  // random order.
   469  func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
   470  func TestIterativeRandomStateSyncBatched(t *testing.T)    { testIterativeRandomStateSync(t, 100) }
   471  
   472  func testIterativeRandomStateSync(t *testing.T, count int) {
   473  	// Create a random state to copy
   474  	srcState, srcRoot, srcAccounts := makeTestState(t)
   475  	srcState.TrieDB().Commit(srcRoot, false, 0)
   476  
   477  	// Create a destination state and sync with the scheduler
   478  	dstDb := database.NewMemoryDBManager()
   479  	dstState := NewDatabase(dstDb)
   480  	sched := NewStateSync(srcRoot, dstDb, statedb.NewSyncBloom(1, dstDb.GetMemDB()), nil, nil)
   481  
   482  	queue := make(map[common.Hash]struct{})
   483  	nodes, _, codes := sched.Missing(count)
   484  	for _, hash := range append(nodes, codes...) {
   485  		queue[hash] = struct{}{}
   486  	}
   487  	for len(queue) > 0 {
   488  		// Fetch all the queued nodes in a random order
   489  		results := make([]statedb.SyncResult, 0, len(queue))
   490  		for hash := range queue {
   491  			data, err := srcState.TrieDB().Node(hash)
   492  			if err != nil {
   493  				data, err = srcState.ContractCode(hash)
   494  			}
   495  			if err != nil {
   496  				t.Fatalf("failed to retrieve node data for %x", hash)
   497  			}
   498  			results = append(results, statedb.SyncResult{Hash: hash, Data: data})
   499  		}
   500  		// Feed the retrieved results back and queue new tasks
   501  		for index, result := range results {
   502  			if err := sched.Process(result); err != nil {
   503  				t.Fatalf("failed to process result #%d: %v", index, err)
   504  			}
   505  		}
   506  		batch := dstDb.NewBatch(database.StateTrieDB)
   507  		if _, err := sched.Commit(batch); err != nil {
   508  			t.Fatalf("failed to commit data: %v", err)
   509  		}
   510  		batch.Write()
   511  		queue = make(map[common.Hash]struct{})
   512  		nodes, _, codes := sched.Missing(0)
   513  		for _, hash := range append(nodes, codes...) {
   514  			queue[hash] = struct{}{}
   515  		}
   516  	}
   517  	// Cross check that the two states are in sync
   518  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   519  
   520  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   521  	assert.NoError(t, err)
   522  
   523  	err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   524  	assert.NoError(t, err)
   525  }
   526  
   527  // Tests that the trie scheduler can correctly reconstruct the state even if only
   528  // partial results are returned (Even those randomly), others sent only later.
   529  func TestIterativeRandomDelayedStateSync(t *testing.T) {
   530  	// Create a random state to copy
   531  	srcState, srcRoot, srcAccounts := makeTestState(t)
   532  	srcState.TrieDB().Commit(srcRoot, false, 0)
   533  
   534  	// Create a destination state and sync with the scheduler
   535  	dstDb := database.NewMemoryDBManager()
   536  	dstState := NewDatabase(dstDb)
   537  	sched := NewStateSync(srcRoot, dstDb, statedb.NewSyncBloom(1, dstDb.GetMemDB()), nil, nil)
   538  
   539  	queue := make(map[common.Hash]struct{})
   540  	nodes, _, codes := sched.Missing(0)
   541  	for _, hash := range append(nodes, codes...) {
   542  		queue[hash] = struct{}{}
   543  	}
   544  	for len(queue) > 0 {
   545  		// Sync only half of the scheduled nodes, even those in random order
   546  		results := make([]statedb.SyncResult, 0, len(queue)/2+1)
   547  		for hash := range queue {
   548  			delete(queue, hash)
   549  
   550  			data, err := srcState.TrieDB().Node(hash)
   551  			if err != nil {
   552  				data, err = srcState.ContractCode(hash)
   553  			}
   554  			if err != nil {
   555  				t.Fatalf("failed to retrieve node data for %x", hash)
   556  			}
   557  			results = append(results, statedb.SyncResult{Hash: hash, Data: data})
   558  
   559  			if len(results) >= cap(results) {
   560  				break
   561  			}
   562  		}
   563  		// Feed the retrieved results back and queue new tasks
   564  		for index, result := range results {
   565  			if err := sched.Process(result); err != nil {
   566  				t.Fatalf("failed to process result #%d: %v", index, err)
   567  			}
   568  		}
   569  		batch := dstDb.NewBatch(database.StateTrieDB)
   570  		if _, err := sched.Commit(batch); err != nil {
   571  			t.Fatalf("failed to commit data: %v", err)
   572  		}
   573  		batch.Write()
   574  		for _, result := range results {
   575  			delete(queue, result.Hash)
   576  		}
   577  		nodes, _, codes = sched.Missing(0)
   578  		for _, hash := range append(nodes, codes...) {
   579  			queue[hash] = struct{}{}
   580  		}
   581  	}
   582  	// Cross check that the two states are in sync
   583  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   584  
   585  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   586  	assert.NoError(t, err)
   587  
   588  	err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   589  	assert.NoError(t, err)
   590  }
   591  
   592  // Tests that at any point in time during a sync, only complete sub-tries are in
   593  // the database.
   594  func TestIncompleteStateSync(t *testing.T) {
   595  	// Create a random state to copy
   596  	srcState, srcRoot, srcAccounts := makeTestState(t)
   597  	srcState.TrieDB().Commit(srcRoot, false, 0)
   598  
   599  	// isCode reports whether the hash is contract code hash.
   600  	isCode := func(hash common.Hash) bool {
   601  		for _, acc := range srcAccounts {
   602  			if hash == crypto.Keccak256Hash(acc.code) {
   603  				return true
   604  			}
   605  		}
   606  		return false
   607  	}
   608  	checkTrieConsistency(srcState.TrieDB().DiskDB().(database.DBManager), srcRoot)
   609  
   610  	// Create a destination state and sync with the scheduler
   611  	dstDb := database.NewMemoryDBManager()
   612  	dstState := NewDatabase(dstDb)
   613  	sched := NewStateSync(srcRoot, dstDb, statedb.NewSyncBloom(1, dstDb.GetMemDB()), nil, nil)
   614  
   615  	var added []common.Hash
   616  
   617  	nodes, _, codes := sched.Missing(1)
   618  	queue := append(append([]common.Hash{}, nodes...), codes...)
   619  
   620  	for len(queue) > 0 {
   621  		// Fetch a batch of state nodes
   622  		results := make([]statedb.SyncResult, len(queue))
   623  		for i, hash := range queue {
   624  			data, err := srcState.TrieDB().Node(hash)
   625  			if err != nil {
   626  				data, err = srcState.ContractCode(hash)
   627  			}
   628  			if err != nil {
   629  				t.Fatalf("failed to retrieve node data for %x", hash)
   630  			}
   631  			results[i] = statedb.SyncResult{Hash: hash, Data: data}
   632  		}
   633  		// Process each of the state nodes
   634  		for index, result := range results {
   635  			if err := sched.Process(result); err != nil {
   636  				t.Fatalf("failed to process result #%d: %v", index, err)
   637  			}
   638  		}
   639  		batch := dstDb.NewBatch(database.StateTrieDB)
   640  		if _, err := sched.Commit(batch); err != nil {
   641  			t.Fatalf("failed to commit data: %v", err)
   642  		}
   643  		batch.Write()
   644  		for _, result := range results {
   645  			added = append(added, result.Hash)
   646  		}
   647  		// Check that all known sub-tries added so far are complete or missing entirely.
   648  		for _, hash := range added {
   649  			if isCode(hash) {
   650  				continue
   651  			}
   652  			// Can't use checkStateConsistency here because subtrie keys may have odd
   653  			// length and crash in LeafKey.
   654  			if err := checkTrieConsistency(dstDb, hash); err != nil {
   655  				t.Fatalf("state inconsistent: %v", err)
   656  			}
   657  		}
   658  		// Fetch the next batch to retrieve
   659  		nodes, _, codes = sched.Missing(1)
   660  		queue = append(append(queue[:0], nodes...), codes...)
   661  	}
   662  	// Sanity check that removing any node from the database is detected
   663  	for _, node := range added[1:] {
   664  		var (
   665  			key  = node.Bytes()
   666  			code = isCode(node)
   667  			val  []byte
   668  		)
   669  		if code {
   670  			val = dstDb.ReadCode(node)
   671  			dstState.DeleteCode(node)
   672  		} else {
   673  			val, _ = dstDb.ReadCachedTrieNode(node)
   674  			dstDb.GetMemDB().Delete(node[:])
   675  		}
   676  
   677  		if err := checkStateConsistency(dstDb, added[0]); err == nil {
   678  			t.Fatalf("trie inconsistency not caught, missing: %x", key)
   679  		}
   680  
   681  		err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   682  		assert.Error(t, err)
   683  
   684  		err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   685  		assert.Error(t, err)
   686  		if code {
   687  			dstDb.WriteCode(node, val)
   688  		} else {
   689  			// insert a trie node to memory database
   690  			dstDb.GetMemDB().Put(node[:], val)
   691  		}
   692  	}
   693  
   694  	err := CheckStateConsistency(srcState, dstState, srcRoot, 100, nil)
   695  	assert.NoError(t, err)
   696  
   697  	err = CheckStateConsistencyParallel(srcState, dstState, srcRoot, nil)
   698  	assert.NoError(t, err)
   699  }