github.com/neatio-net/neatio@v1.7.3-0.20231114194659-f4d7a2226baa/chain/core/state/sync_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package state
    18  
    19  import (
    20  	"bytes"
    21  	"math/big"
    22  	"testing"
    23  
    24  	"github.com/neatio-net/neatio/chain/core/rawdb"
    25  	"github.com/neatio-net/neatio/chain/trie"
    26  	"github.com/neatio-net/neatio/neatdb"
    27  	"github.com/neatio-net/neatio/utilities/common"
    28  	"github.com/neatio-net/neatio/utilities/crypto"
    29  )
    30  
    31  // testAccount is the data associated with an account used by the state tests.
    32  type testAccount struct {
    33  	address common.Address
    34  	balance *big.Int
    35  	nonce   uint64
    36  	code    []byte
    37  }
    38  
    39  // makeTestState create a sample test state to test node-wise reconstruction.
    40  func makeTestState() (Database, common.Hash, []*testAccount) {
    41  	// Create an empty state
    42  	diskdb := rawdb.NewMemoryDatabase()
    43  	db := NewDatabase(diskdb)
    44  	state, _ := New(common.Hash{}, db)
    45  
    46  	// Fill it with some arbitrary data
    47  	accounts := []*testAccount{}
    48  	for i := byte(0); i < 96; i++ {
    49  		obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
    50  		acc := &testAccount{address: common.BytesToAddress([]byte{i})}
    51  
    52  		obj.AddBalance(big.NewInt(int64(11 * i)))
    53  		acc.balance = big.NewInt(int64(11 * i))
    54  
    55  		obj.SetNonce(uint64(42 * i))
    56  		acc.nonce = uint64(42 * i)
    57  
    58  		if i%3 == 0 {
    59  			obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i})
    60  			acc.code = []byte{i, i, i, i, i}
    61  		}
    62  		state.updateStateObject(obj)
    63  		accounts = append(accounts, acc)
    64  	}
    65  	root, _ := state.Commit(false)
    66  
    67  	// Return the generated state
    68  	return db, root, accounts
    69  }
    70  
    71  // checkStateAccounts cross references a reconstructed state with an expected
    72  // account array.
    73  func checkStateAccounts(t *testing.T, db neatdb.Database, root common.Hash, accounts []*testAccount) {
    74  	// Check root availability and state contents
    75  	state, err := New(root, NewDatabase(db))
    76  	if err != nil {
    77  		t.Fatalf("failed to create state trie at %x: %v", root, err)
    78  	}
    79  	if err := checkStateConsistency(db, root); err != nil {
    80  		t.Fatalf("inconsistent state trie at %x: %v", root, err)
    81  	}
    82  	for i, acc := range accounts {
    83  		if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 {
    84  			t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance)
    85  		}
    86  		if nonce := state.GetNonce(acc.address); nonce != acc.nonce {
    87  			t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce)
    88  		}
    89  		if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) {
    90  			t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code)
    91  		}
    92  	}
    93  }
    94  
    95  // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present.
    96  func checkTrieConsistency(db neatdb.Database, root common.Hash) error {
    97  	if v, _ := db.Get(root[:]); v == nil {
    98  		return nil // Consider a non existent state consistent.
    99  	}
   100  	trie, err := trie.New(root, trie.NewDatabase(db))
   101  	if err != nil {
   102  		return err
   103  	}
   104  	it := trie.NodeIterator(nil)
   105  	for it.Next(true) {
   106  	}
   107  	return it.Error()
   108  }
   109  
   110  // checkStateConsistency checks that all data of a state root is present.
   111  func checkStateConsistency(db neatdb.Database, root common.Hash) error {
   112  	// Create and iterate a state trie rooted in a sub-node
   113  	if _, err := db.Get(root.Bytes()); err != nil {
   114  		return nil // Consider a non existent state consistent.
   115  	}
   116  	state, err := New(root, NewDatabase(db))
   117  	if err != nil {
   118  		return err
   119  	}
   120  	it := NewNodeIterator(state)
   121  	for it.Next() {
   122  	}
   123  	return it.Error
   124  }
   125  
   126  // Tests that an empty state is not scheduled for syncing.
   127  func TestEmptyStateSync(t *testing.T) {
   128  	empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
   129  	db := rawdb.NewMemoryDatabase()
   130  	if req := NewStateSync(empty, db).Missing(1); len(req) != 0 {
   131  		t.Errorf("content requested for empty state: %v", req)
   132  	}
   133  }
   134  
   135  // Tests that given a root hash, a state can sync iteratively on a single thread,
   136  // requesting retrieval tasks and returning all of them in one go.
   137  func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) }
   138  func TestIterativeStateSyncBatched(t *testing.T)    { testIterativeStateSync(t, 100) }
   139  
   140  func testIterativeStateSync(t *testing.T, batch int) {
   141  	// Create a random state to copy
   142  	srcDb, srcRoot, srcAccounts := makeTestState()
   143  
   144  	// Create a destination state and sync with the scheduler
   145  	dstDb := rawdb.NewMemoryDatabase()
   146  	sched := NewStateSync(srcRoot, dstDb)
   147  
   148  	queue := append([]common.Hash{}, sched.Missing(batch)...)
   149  	for len(queue) > 0 {
   150  		results := make([]trie.SyncResult, len(queue))
   151  		for i, hash := range queue {
   152  			data, err := srcDb.TrieDB().Node(hash)
   153  			if err != nil {
   154  				t.Fatalf("failed to retrieve node data for %x", hash)
   155  			}
   156  			results[i] = trie.SyncResult{Hash: hash, Data: data}
   157  		}
   158  		if _, index, err := sched.Process(results); err != nil {
   159  			t.Fatalf("failed to process result #%d: %v", index, err)
   160  		}
   161  		if index, err := sched.Commit(dstDb); err != nil {
   162  			t.Fatalf("failed to commit data #%d: %v", index, err)
   163  		}
   164  		queue = append(queue[:0], sched.Missing(batch)...)
   165  	}
   166  	// Cross check that the two states are in sync
   167  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   168  }
   169  
   170  // Tests that the trie scheduler can correctly reconstruct the state even if only
   171  // partial results are returned, and the others sent only later.
   172  func TestIterativeDelayedStateSync(t *testing.T) {
   173  	// Create a random state to copy
   174  	srcDb, srcRoot, srcAccounts := makeTestState()
   175  
   176  	// Create a destination state and sync with the scheduler
   177  	dstDb := rawdb.NewMemoryDatabase()
   178  	sched := NewStateSync(srcRoot, dstDb)
   179  
   180  	queue := append([]common.Hash{}, sched.Missing(0)...)
   181  	for len(queue) > 0 {
   182  		// Sync only half of the scheduled nodes
   183  		results := make([]trie.SyncResult, len(queue)/2+1)
   184  		for i, hash := range queue[:len(results)] {
   185  			data, err := srcDb.TrieDB().Node(hash)
   186  			if err != nil {
   187  				t.Fatalf("failed to retrieve node data for %x", hash)
   188  			}
   189  			results[i] = trie.SyncResult{Hash: hash, Data: data}
   190  		}
   191  		if _, index, err := sched.Process(results); err != nil {
   192  			t.Fatalf("failed to process result #%d: %v", index, err)
   193  		}
   194  		if index, err := sched.Commit(dstDb); err != nil {
   195  			t.Fatalf("failed to commit data #%d: %v", index, err)
   196  		}
   197  		queue = append(queue[len(results):], sched.Missing(0)...)
   198  	}
   199  	// Cross check that the two states are in sync
   200  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   201  }
   202  
   203  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   204  // requesting retrieval tasks and returning all of them in one go, however in a
   205  // random order.
   206  func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
   207  func TestIterativeRandomStateSyncBatched(t *testing.T)    { testIterativeRandomStateSync(t, 100) }
   208  
   209  func testIterativeRandomStateSync(t *testing.T, batch int) {
   210  	// Create a random state to copy
   211  	srcDb, srcRoot, srcAccounts := makeTestState()
   212  
   213  	// Create a destination state and sync with the scheduler
   214  	dstDb := rawdb.NewMemoryDatabase()
   215  	sched := NewStateSync(srcRoot, dstDb)
   216  
   217  	queue := make(map[common.Hash]struct{})
   218  	for _, hash := range sched.Missing(batch) {
   219  		queue[hash] = struct{}{}
   220  	}
   221  	for len(queue) > 0 {
   222  		// Fetch all the queued nodes in a random order
   223  		results := make([]trie.SyncResult, 0, len(queue))
   224  		for hash := range queue {
   225  			data, err := srcDb.TrieDB().Node(hash)
   226  			if err != nil {
   227  				t.Fatalf("failed to retrieve node data for %x", hash)
   228  			}
   229  			results = append(results, trie.SyncResult{Hash: hash, Data: data})
   230  		}
   231  		// Feed the retrieved results back and queue new tasks
   232  		if _, index, err := sched.Process(results); err != nil {
   233  			t.Fatalf("failed to process result #%d: %v", index, err)
   234  		}
   235  		if index, err := sched.Commit(dstDb); err != nil {
   236  			t.Fatalf("failed to commit data #%d: %v", index, err)
   237  		}
   238  		queue = make(map[common.Hash]struct{})
   239  		for _, hash := range sched.Missing(batch) {
   240  			queue[hash] = struct{}{}
   241  		}
   242  	}
   243  	// Cross check that the two states are in sync
   244  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   245  }
   246  
   247  // Tests that the trie scheduler can correctly reconstruct the state even if only
   248  // partial results are returned (Even those randomly), others sent only later.
   249  func TestIterativeRandomDelayedStateSync(t *testing.T) {
   250  	// Create a random state to copy
   251  	srcDb, srcRoot, srcAccounts := makeTestState()
   252  
   253  	// Create a destination state and sync with the scheduler
   254  	dstDb := rawdb.NewMemoryDatabase()
   255  	sched := NewStateSync(srcRoot, dstDb)
   256  
   257  	queue := make(map[common.Hash]struct{})
   258  	for _, hash := range sched.Missing(0) {
   259  		queue[hash] = struct{}{}
   260  	}
   261  	for len(queue) > 0 {
   262  		// Sync only half of the scheduled nodes, even those in random order
   263  		results := make([]trie.SyncResult, 0, len(queue)/2+1)
   264  		for hash := range queue {
   265  			delete(queue, hash)
   266  
   267  			data, err := srcDb.TrieDB().Node(hash)
   268  			if err != nil {
   269  				t.Fatalf("failed to retrieve node data for %x", hash)
   270  			}
   271  			results = append(results, trie.SyncResult{Hash: hash, Data: data})
   272  
   273  			if len(results) >= cap(results) {
   274  				break
   275  			}
   276  		}
   277  		// Feed the retrieved results back and queue new tasks
   278  		if _, index, err := sched.Process(results); err != nil {
   279  			t.Fatalf("failed to process result #%d: %v", index, err)
   280  		}
   281  		if index, err := sched.Commit(dstDb); err != nil {
   282  			t.Fatalf("failed to commit data #%d: %v", index, err)
   283  		}
   284  		for _, hash := range sched.Missing(0) {
   285  			queue[hash] = struct{}{}
   286  		}
   287  	}
   288  	// Cross check that the two states are in sync
   289  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   290  }
   291  
   292  // Tests that at any point in time during a sync, only complete sub-tries are in
   293  // the database.
   294  func TestIncompleteStateSync(t *testing.T) {
   295  	// Create a random state to copy
   296  	srcDb, srcRoot, srcAccounts := makeTestState()
   297  
   298  	checkTrieConsistency(srcDb.TrieDB().DiskDB().(neatdb.Database), srcRoot)
   299  
   300  	// Create a destination state and sync with the scheduler
   301  	dstDb := rawdb.NewMemoryDatabase()
   302  	sched := NewStateSync(srcRoot, dstDb)
   303  
   304  	added := []common.Hash{}
   305  	queue := append([]common.Hash{}, sched.Missing(1)...)
   306  	for len(queue) > 0 {
   307  		// Fetch a batch of state nodes
   308  		results := make([]trie.SyncResult, len(queue))
   309  		for i, hash := range queue {
   310  			data, err := srcDb.TrieDB().Node(hash)
   311  			if err != nil {
   312  				t.Fatalf("failed to retrieve node data for %x", hash)
   313  			}
   314  			results[i] = trie.SyncResult{Hash: hash, Data: data}
   315  		}
   316  		// Process each of the state nodes
   317  		if _, index, err := sched.Process(results); err != nil {
   318  			t.Fatalf("failed to process result #%d: %v", index, err)
   319  		}
   320  		if index, err := sched.Commit(dstDb); err != nil {
   321  			t.Fatalf("failed to commit data #%d: %v", index, err)
   322  		}
   323  		for _, result := range results {
   324  			added = append(added, result.Hash)
   325  		}
   326  		// Check that all known sub-tries added so far are complete or missing entirely.
   327  	checkSubtries:
   328  		for _, hash := range added {
   329  			for _, acc := range srcAccounts {
   330  				if hash == crypto.Keccak256Hash(acc.code) {
   331  					continue checkSubtries // skip trie check of code nodes.
   332  				}
   333  			}
   334  			// Can't use checkStateConsistency here because subtrie keys may have odd
   335  			// length and crash in LeafKey.
   336  			if err := checkTrieConsistency(dstDb, hash); err != nil {
   337  				t.Fatalf("state inconsistent: %v", err)
   338  			}
   339  		}
   340  		// Fetch the next batch to retrieve
   341  		queue = append(queue[:0], sched.Missing(1)...)
   342  	}
   343  	// Sanity check that removing any node from the database is detected
   344  	for _, node := range added[1:] {
   345  		key := node.Bytes()
   346  		value, _ := dstDb.Get(key)
   347  
   348  		dstDb.Delete(key)
   349  		if err := checkStateConsistency(dstDb, added[0]); err == nil {
   350  			t.Fatalf("trie inconsistency not caught, missing: %x", key)
   351  		}
   352  		dstDb.Put(key, value)
   353  	}
   354  }