github.com/DxChainNetwork/dxc@v0.8.1-0.20220824085222-1162e304b6e7/core/state/sync_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package state
    18  
    19  import (
    20  	"bytes"
    21  	"math/big"
    22  	"testing"
    23  
    24  	"github.com/DxChainNetwork/dxc/common"
    25  	"github.com/DxChainNetwork/dxc/core/rawdb"
    26  	"github.com/DxChainNetwork/dxc/crypto"
    27  	"github.com/DxChainNetwork/dxc/ethdb"
    28  	"github.com/DxChainNetwork/dxc/ethdb/memorydb"
    29  	"github.com/DxChainNetwork/dxc/rlp"
    30  	"github.com/DxChainNetwork/dxc/trie"
    31  )
    32  
    33  // testAccount is the data associated with an account used by the state tests.
    34  type testAccount struct {
    35  	address common.Address
    36  	balance *big.Int
    37  	nonce   uint64
    38  	code    []byte
    39  }
    40  
    41  // makeTestState create a sample test state to test node-wise reconstruction.
    42  func makeTestState() (Database, common.Hash, []*testAccount) {
    43  	// Create an empty state
    44  	db := NewDatabase(rawdb.NewMemoryDatabase())
    45  	state, _ := New(common.Hash{}, db, nil)
    46  
    47  	// Fill it with some arbitrary data
    48  	var accounts []*testAccount
    49  	for i := byte(0); i < 96; i++ {
    50  		obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
    51  		acc := &testAccount{address: common.BytesToAddress([]byte{i})}
    52  
    53  		obj.AddBalance(big.NewInt(int64(11 * i)))
    54  		acc.balance = big.NewInt(int64(11 * i))
    55  
    56  		obj.SetNonce(uint64(42 * i))
    57  		acc.nonce = uint64(42 * i)
    58  
    59  		if i%3 == 0 {
    60  			obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i})
    61  			acc.code = []byte{i, i, i, i, i}
    62  		}
    63  		if i%5 == 0 {
    64  			for j := byte(0); j < 5; j++ {
    65  				hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j})
    66  				obj.SetState(db, hash, hash)
    67  			}
    68  		}
    69  		state.updateStateObject(obj)
    70  		accounts = append(accounts, acc)
    71  	}
    72  	root, _ := state.Commit(false)
    73  
    74  	// Return the generated state
    75  	return db, root, accounts
    76  }
    77  
    78  // checkStateAccounts cross references a reconstructed state with an expected
    79  // account array.
    80  func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) {
    81  	// Check root availability and state contents
    82  	state, err := New(root, NewDatabase(db), nil)
    83  	if err != nil {
    84  		t.Fatalf("failed to create state trie at %x: %v", root, err)
    85  	}
    86  	if err := checkStateConsistency(db, root); err != nil {
    87  		t.Fatalf("inconsistent state trie at %x: %v", root, err)
    88  	}
    89  	for i, acc := range accounts {
    90  		if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 {
    91  			t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance)
    92  		}
    93  		if nonce := state.GetNonce(acc.address); nonce != acc.nonce {
    94  			t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce)
    95  		}
    96  		if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) {
    97  			t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code)
    98  		}
    99  	}
   100  }
   101  
   102  // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present.
   103  func checkTrieConsistency(db ethdb.Database, root common.Hash) error {
   104  	if v, _ := db.Get(root[:]); v == nil {
   105  		return nil // Consider a non existent state consistent.
   106  	}
   107  	trie, err := trie.New(root, trie.NewDatabase(db))
   108  	if err != nil {
   109  		return err
   110  	}
   111  	it := trie.NodeIterator(nil)
   112  	for it.Next(true) {
   113  	}
   114  	return it.Error()
   115  }
   116  
   117  // checkStateConsistency checks that all data of a state root is present.
   118  func checkStateConsistency(db ethdb.Database, root common.Hash) error {
   119  	// Create and iterate a state trie rooted in a sub-node
   120  	if _, err := db.Get(root.Bytes()); err != nil {
   121  		return nil // Consider a non existent state consistent.
   122  	}
   123  	state, err := New(root, NewDatabase(db), nil)
   124  	if err != nil {
   125  		return err
   126  	}
   127  	it := NewNodeIterator(state)
   128  	for it.Next() {
   129  	}
   130  	return it.Error
   131  }
   132  
   133  // Tests that an empty state is not scheduled for syncing.
   134  func TestEmptyStateSync(t *testing.T) {
   135  	empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
   136  	sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil)
   137  	if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   138  		t.Errorf(" content requested for empty state: %v, %v, %v", nodes, paths, codes)
   139  	}
   140  }
   141  
   142  // Tests that given a root hash, a state can sync iteratively on a single thread,
   143  // requesting retrieval tasks and returning all of them in one go.
   144  func TestIterativeStateSyncIndividual(t *testing.T) {
   145  	testIterativeStateSync(t, 1, false, false)
   146  }
   147  func TestIterativeStateSyncBatched(t *testing.T) {
   148  	testIterativeStateSync(t, 100, false, false)
   149  }
   150  func TestIterativeStateSyncIndividualFromDisk(t *testing.T) {
   151  	testIterativeStateSync(t, 1, true, false)
   152  }
   153  func TestIterativeStateSyncBatchedFromDisk(t *testing.T) {
   154  	testIterativeStateSync(t, 100, true, false)
   155  }
   156  func TestIterativeStateSyncIndividualByPath(t *testing.T) {
   157  	testIterativeStateSync(t, 1, false, true)
   158  }
   159  func TestIterativeStateSyncBatchedByPath(t *testing.T) {
   160  	testIterativeStateSync(t, 100, false, true)
   161  }
   162  
   163  func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
   164  	// Create a random state to copy
   165  	srcDb, srcRoot, srcAccounts := makeTestState()
   166  	if commit {
   167  		srcDb.TrieDB().Commit(srcRoot, false, nil)
   168  	}
   169  	srcTrie, _ := trie.New(srcRoot, srcDb.TrieDB())
   170  
   171  	// Create a destination state and sync with the scheduler
   172  	dstDb := rawdb.NewMemoryDatabase()
   173  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
   174  
   175  	nodes, paths, codes := sched.Missing(count)
   176  	var (
   177  		hashQueue []common.Hash
   178  		pathQueue []trie.SyncPath
   179  	)
   180  	if !bypath {
   181  		hashQueue = append(append(hashQueue[:0], nodes...), codes...)
   182  	} else {
   183  		hashQueue = append(hashQueue[:0], codes...)
   184  		pathQueue = append(pathQueue[:0], paths...)
   185  	}
   186  	for len(hashQueue)+len(pathQueue) > 0 {
   187  		results := make([]trie.SyncResult, len(hashQueue)+len(pathQueue))
   188  		for i, hash := range hashQueue {
   189  			data, err := srcDb.TrieDB().Node(hash)
   190  			if err != nil {
   191  				data, err = srcDb.ContractCode(common.Hash{}, hash)
   192  			}
   193  			if err != nil {
   194  				t.Fatalf("failed to retrieve node data for hash %x", hash)
   195  			}
   196  			results[i] = trie.SyncResult{Hash: hash, Data: data}
   197  		}
   198  		for i, path := range pathQueue {
   199  			if len(path) == 1 {
   200  				data, _, err := srcTrie.TryGetNode(path[0])
   201  				if err != nil {
   202  					t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
   203  				}
   204  				results[len(hashQueue)+i] = trie.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
   205  			} else {
   206  				var acc Account
   207  				if err := rlp.DecodeBytes(srcTrie.Get(path[0]), &acc); err != nil {
   208  					t.Fatalf("failed to decode account on path %x: %v", path, err)
   209  				}
   210  				stTrie, err := trie.New(acc.Root, srcDb.TrieDB())
   211  				if err != nil {
   212  					t.Fatalf("failed to retriev storage trie for path %x: %v", path, err)
   213  				}
   214  				data, _, err := stTrie.TryGetNode(path[1])
   215  				if err != nil {
   216  					t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
   217  				}
   218  				results[len(hashQueue)+i] = trie.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
   219  			}
   220  		}
   221  		for _, result := range results {
   222  			if err := sched.Process(result); err != nil {
   223  				t.Errorf("failed to process result %v", err)
   224  			}
   225  		}
   226  		batch := dstDb.NewBatch()
   227  		if err := sched.Commit(batch); err != nil {
   228  			t.Fatalf("failed to commit data: %v", err)
   229  		}
   230  		batch.Write()
   231  
   232  		nodes, paths, codes = sched.Missing(count)
   233  		if !bypath {
   234  			hashQueue = append(append(hashQueue[:0], nodes...), codes...)
   235  		} else {
   236  			hashQueue = append(hashQueue[:0], codes...)
   237  			pathQueue = append(pathQueue[:0], paths...)
   238  		}
   239  	}
   240  	// Cross check that the two states are in sync
   241  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   242  }
   243  
   244  // Tests that the trie scheduler can correctly reconstruct the state even if only
   245  // partial results are returned, and the others sent only later.
   246  func TestIterativeDelayedStateSync(t *testing.T) {
   247  	// Create a random state to copy
   248  	srcDb, srcRoot, srcAccounts := makeTestState()
   249  
   250  	// Create a destination state and sync with the scheduler
   251  	dstDb := rawdb.NewMemoryDatabase()
   252  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
   253  
   254  	nodes, _, codes := sched.Missing(0)
   255  	queue := append(append([]common.Hash{}, nodes...), codes...)
   256  
   257  	for len(queue) > 0 {
   258  		// Sync only half of the scheduled nodes
   259  		results := make([]trie.SyncResult, len(queue)/2+1)
   260  		for i, hash := range queue[:len(results)] {
   261  			data, err := srcDb.TrieDB().Node(hash)
   262  			if err != nil {
   263  				data, err = srcDb.ContractCode(common.Hash{}, hash)
   264  			}
   265  			if err != nil {
   266  				t.Fatalf("failed to retrieve node data for %x", hash)
   267  			}
   268  			results[i] = trie.SyncResult{Hash: hash, Data: data}
   269  		}
   270  		for _, result := range results {
   271  			if err := sched.Process(result); err != nil {
   272  				t.Fatalf("failed to process result %v", err)
   273  			}
   274  		}
   275  		batch := dstDb.NewBatch()
   276  		if err := sched.Commit(batch); err != nil {
   277  			t.Fatalf("failed to commit data: %v", err)
   278  		}
   279  		batch.Write()
   280  
   281  		nodes, _, codes = sched.Missing(0)
   282  		queue = append(append(queue[len(results):], nodes...), codes...)
   283  	}
   284  	// Cross check that the two states are in sync
   285  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   286  }
   287  
   288  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   289  // requesting retrieval tasks and returning all of them in one go, however in a
   290  // random order.
   291  func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
   292  func TestIterativeRandomStateSyncBatched(t *testing.T)    { testIterativeRandomStateSync(t, 100) }
   293  
   294  func testIterativeRandomStateSync(t *testing.T, count int) {
   295  	// Create a random state to copy
   296  	srcDb, srcRoot, srcAccounts := makeTestState()
   297  
   298  	// Create a destination state and sync with the scheduler
   299  	dstDb := rawdb.NewMemoryDatabase()
   300  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
   301  
   302  	queue := make(map[common.Hash]struct{})
   303  	nodes, _, codes := sched.Missing(count)
   304  	for _, hash := range append(nodes, codes...) {
   305  		queue[hash] = struct{}{}
   306  	}
   307  	for len(queue) > 0 {
   308  		// Fetch all the queued nodes in a random order
   309  		results := make([]trie.SyncResult, 0, len(queue))
   310  		for hash := range queue {
   311  			data, err := srcDb.TrieDB().Node(hash)
   312  			if err != nil {
   313  				data, err = srcDb.ContractCode(common.Hash{}, hash)
   314  			}
   315  			if err != nil {
   316  				t.Fatalf("failed to retrieve node data for %x", hash)
   317  			}
   318  			results = append(results, trie.SyncResult{Hash: hash, Data: data})
   319  		}
   320  		// Feed the retrieved results back and queue new tasks
   321  		for _, result := range results {
   322  			if err := sched.Process(result); err != nil {
   323  				t.Fatalf("failed to process result %v", err)
   324  			}
   325  		}
   326  		batch := dstDb.NewBatch()
   327  		if err := sched.Commit(batch); err != nil {
   328  			t.Fatalf("failed to commit data: %v", err)
   329  		}
   330  		batch.Write()
   331  
   332  		queue = make(map[common.Hash]struct{})
   333  		nodes, _, codes = sched.Missing(count)
   334  		for _, hash := range append(nodes, codes...) {
   335  			queue[hash] = struct{}{}
   336  		}
   337  	}
   338  	// Cross check that the two states are in sync
   339  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   340  }
   341  
   342  // Tests that the trie scheduler can correctly reconstruct the state even if only
   343  // partial results are returned (Even those randomly), others sent only later.
   344  func TestIterativeRandomDelayedStateSync(t *testing.T) {
   345  	// Create a random state to copy
   346  	srcDb, srcRoot, srcAccounts := makeTestState()
   347  
   348  	// Create a destination state and sync with the scheduler
   349  	dstDb := rawdb.NewMemoryDatabase()
   350  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
   351  
   352  	queue := make(map[common.Hash]struct{})
   353  	nodes, _, codes := sched.Missing(0)
   354  	for _, hash := range append(nodes, codes...) {
   355  		queue[hash] = struct{}{}
   356  	}
   357  	for len(queue) > 0 {
   358  		// Sync only half of the scheduled nodes, even those in random order
   359  		results := make([]trie.SyncResult, 0, len(queue)/2+1)
   360  		for hash := range queue {
   361  			delete(queue, hash)
   362  
   363  			data, err := srcDb.TrieDB().Node(hash)
   364  			if err != nil {
   365  				data, err = srcDb.ContractCode(common.Hash{}, hash)
   366  			}
   367  			if err != nil {
   368  				t.Fatalf("failed to retrieve node data for %x", hash)
   369  			}
   370  			results = append(results, trie.SyncResult{Hash: hash, Data: data})
   371  
   372  			if len(results) >= cap(results) {
   373  				break
   374  			}
   375  		}
   376  		// Feed the retrieved results back and queue new tasks
   377  		for _, result := range results {
   378  			if err := sched.Process(result); err != nil {
   379  				t.Fatalf("failed to process result %v", err)
   380  			}
   381  		}
   382  		batch := dstDb.NewBatch()
   383  		if err := sched.Commit(batch); err != nil {
   384  			t.Fatalf("failed to commit data: %v", err)
   385  		}
   386  		batch.Write()
   387  		for _, result := range results {
   388  			delete(queue, result.Hash)
   389  		}
   390  		nodes, _, codes = sched.Missing(0)
   391  		for _, hash := range append(nodes, codes...) {
   392  			queue[hash] = struct{}{}
   393  		}
   394  	}
   395  	// Cross check that the two states are in sync
   396  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   397  }
   398  
   399  // Tests that at any point in time during a sync, only complete sub-tries are in
   400  // the database.
   401  func TestIncompleteStateSync(t *testing.T) {
   402  	// Create a random state to copy
   403  	srcDb, srcRoot, srcAccounts := makeTestState()
   404  
   405  	// isCodeLookup to save some hashing
   406  	var isCode = make(map[common.Hash]struct{})
   407  	for _, acc := range srcAccounts {
   408  		if len(acc.code) > 0 {
   409  			isCode[crypto.Keccak256Hash(acc.code)] = struct{}{}
   410  		}
   411  	}
   412  	isCode[common.BytesToHash(emptyCodeHash)] = struct{}{}
   413  	checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot)
   414  
   415  	// Create a destination state and sync with the scheduler
   416  	dstDb := rawdb.NewMemoryDatabase()
   417  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
   418  
   419  	var added []common.Hash
   420  
   421  	nodes, _, codes := sched.Missing(1)
   422  	queue := append(append([]common.Hash{}, nodes...), codes...)
   423  
   424  	for len(queue) > 0 {
   425  		// Fetch a batch of state nodes
   426  		results := make([]trie.SyncResult, len(queue))
   427  		for i, hash := range queue {
   428  			data, err := srcDb.TrieDB().Node(hash)
   429  			if err != nil {
   430  				data, err = srcDb.ContractCode(common.Hash{}, hash)
   431  			}
   432  			if err != nil {
   433  				t.Fatalf("failed to retrieve node data for %x", hash)
   434  			}
   435  			results[i] = trie.SyncResult{Hash: hash, Data: data}
   436  		}
   437  		// Process each of the state nodes
   438  		for _, result := range results {
   439  			if err := sched.Process(result); err != nil {
   440  				t.Fatalf("failed to process result %v", err)
   441  			}
   442  		}
   443  		batch := dstDb.NewBatch()
   444  		if err := sched.Commit(batch); err != nil {
   445  			t.Fatalf("failed to commit data: %v", err)
   446  		}
   447  		batch.Write()
   448  		for _, result := range results {
   449  			added = append(added, result.Hash)
   450  			// Check that all known sub-tries added so far are complete or missing entirely.
   451  			if _, ok := isCode[result.Hash]; ok {
   452  				continue
   453  			}
   454  			// Can't use checkStateConsistency here because subtrie keys may have odd
   455  			// length and crash in LeafKey.
   456  			if err := checkTrieConsistency(dstDb, result.Hash); err != nil {
   457  				t.Fatalf("state inconsistent: %v", err)
   458  			}
   459  		}
   460  		// Fetch the next batch to retrieve
   461  		nodes, _, codes = sched.Missing(1)
   462  		queue = append(append(queue[:0], nodes...), codes...)
   463  	}
   464  	// Sanity check that removing any node from the database is detected
   465  	for _, node := range added[1:] {
   466  		var (
   467  			key     = node.Bytes()
   468  			_, code = isCode[node]
   469  			val     []byte
   470  		)
   471  		if code {
   472  			val = rawdb.ReadCode(dstDb, node)
   473  			rawdb.DeleteCode(dstDb, node)
   474  		} else {
   475  			val = rawdb.ReadTrieNode(dstDb, node)
   476  			rawdb.DeleteTrieNode(dstDb, node)
   477  		}
   478  		if err := checkStateConsistency(dstDb, added[0]); err == nil {
   479  			t.Fatalf("trie inconsistency not caught, missing: %x", key)
   480  		}
   481  		if code {
   482  			rawdb.WriteCode(dstDb, node, val)
   483  		} else {
   484  			rawdb.WriteTrieNode(dstDb, node, val)
   485  		}
   486  	}
   487  }