github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/core/state/sync_test.go (about)

     1  //  Copyright 2018 The go-ethereum Authors
     2  //  Copyright 2019 The go-aigar Authors
     3  //  This file is part of the go-aigar library.
     4  //
     5  //  The go-aigar library is free software: you can redistribute it and/or modify
     6  //  it under the terms of the GNU Lesser General Public License as published by
     7  //  the Free Software Foundation, either version 3 of the License, or
     8  //  (at your option) any later version.
     9  //
    10  //  The go-aigar library is distributed in the hope that it will be useful,
    11  //  but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  //  GNU Lesser General Public License for more details.
    14  //
    15  //  You should have received a copy of the GNU Lesser General Public License
    16  //  along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package state
    19  
    20  import (
    21  	"bytes"
    22  	"math/big"
    23  	"testing"
    24  
    25  	"github.com/AigarNetwork/aigar/common"
    26  	"github.com/AigarNetwork/aigar/core/rawdb"
    27  	"github.com/AigarNetwork/aigar/crypto"
    28  	"github.com/AigarNetwork/aigar/ethdb"
    29  	"github.com/AigarNetwork/aigar/ethdb/memorydb"
    30  	"github.com/AigarNetwork/aigar/trie"
    31  )
    32  
    33  // testAccount is the data associated with an account used by the state tests.
    34  type testAccount struct {
    35  	address common.Address
    36  	balance *big.Int
    37  	nonce   uint64
    38  	code    []byte
    39  }
    40  
    41  // makeTestState create a sample test state to test node-wise reconstruction.
    42  func makeTestState() (Database, common.Hash, []*testAccount) {
    43  	// Create an empty state
    44  	db := NewDatabase(rawdb.NewMemoryDatabase())
    45  	state, _ := New(common.Hash{}, db)
    46  
    47  	// Fill it with some arbitrary data
    48  	accounts := []*testAccount{}
    49  	for i := byte(0); i < 96; i++ {
    50  		obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
    51  		acc := &testAccount{address: common.BytesToAddress([]byte{i})}
    52  
    53  		obj.AddBalance(big.NewInt(int64(11 * i)))
    54  		acc.balance = big.NewInt(int64(11 * i))
    55  
    56  		obj.SetNonce(uint64(42 * i))
    57  		acc.nonce = uint64(42 * i)
    58  
    59  		if i%3 == 0 {
    60  			obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i})
    61  			acc.code = []byte{i, i, i, i, i}
    62  		}
    63  		state.updateStateObject(obj)
    64  		accounts = append(accounts, acc)
    65  	}
    66  	root, _ := state.Commit(false)
    67  
    68  	// Return the generated state
    69  	return db, root, accounts
    70  }
    71  
    72  // checkStateAccounts cross references a reconstructed state with an expected
    73  // account array.
    74  func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) {
    75  	// Check root availability and state contents
    76  	state, err := New(root, NewDatabase(db))
    77  	if err != nil {
    78  		t.Fatalf("failed to create state trie at %x: %v", root, err)
    79  	}
    80  	if err := checkStateConsistency(db, root); err != nil {
    81  		t.Fatalf("inconsistent state trie at %x: %v", root, err)
    82  	}
    83  	for i, acc := range accounts {
    84  		if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 {
    85  			t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance)
    86  		}
    87  		if nonce := state.GetNonce(acc.address); nonce != acc.nonce {
    88  			t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce)
    89  		}
    90  		if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) {
    91  			t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code)
    92  		}
    93  	}
    94  }
    95  
    96  // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present.
    97  func checkTrieConsistency(db ethdb.Database, root common.Hash) error {
    98  	if v, _ := db.Get(root[:]); v == nil {
    99  		return nil // Consider a non existent state consistent.
   100  	}
   101  	trie, err := trie.New(root, trie.NewDatabase(db))
   102  	if err != nil {
   103  		return err
   104  	}
   105  	it := trie.NodeIterator(nil)
   106  	for it.Next(true) {
   107  	}
   108  	return it.Error()
   109  }
   110  
   111  // checkStateConsistency checks that all data of a state root is present.
   112  func checkStateConsistency(db ethdb.Database, root common.Hash) error {
   113  	// Create and iterate a state trie rooted in a sub-node
   114  	if _, err := db.Get(root.Bytes()); err != nil {
   115  		return nil // Consider a non existent state consistent.
   116  	}
   117  	state, err := New(root, NewDatabase(db))
   118  	if err != nil {
   119  		return err
   120  	}
   121  	it := NewNodeIterator(state)
   122  	for it.Next() {
   123  	}
   124  	return it.Error
   125  }
   126  
   127  // Tests that an empty state is not scheduled for syncing.
   128  func TestEmptyStateSync(t *testing.T) {
   129  	empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
   130  	if req := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New())).Missing(1); len(req) != 0 {
   131  		t.Errorf("content requested for empty state: %v", req)
   132  	}
   133  }
   134  
   135  // Tests that given a root hash, a state can sync iteratively on a single thread,
   136  // requesting retrieval tasks and returning all of them in one go.
   137  func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) }
   138  func TestIterativeStateSyncBatched(t *testing.T)    { testIterativeStateSync(t, 100) }
   139  
   140  func testIterativeStateSync(t *testing.T, count int) {
   141  	// Create a random state to copy
   142  	srcDb, srcRoot, srcAccounts := makeTestState()
   143  
   144  	// Create a destination state and sync with the scheduler
   145  	dstDb := rawdb.NewMemoryDatabase()
   146  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
   147  
   148  	queue := append([]common.Hash{}, sched.Missing(count)...)
   149  	for len(queue) > 0 {
   150  		results := make([]trie.SyncResult, len(queue))
   151  		for i, hash := range queue {
   152  			data, err := srcDb.TrieDB().Node(hash)
   153  			if err != nil {
   154  				t.Fatalf("failed to retrieve node data for %x", hash)
   155  			}
   156  			results[i] = trie.SyncResult{Hash: hash, Data: data}
   157  		}
   158  		if _, index, err := sched.Process(results); err != nil {
   159  			t.Fatalf("failed to process result #%d: %v", index, err)
   160  		}
   161  		batch := dstDb.NewBatch()
   162  		if err := sched.Commit(batch); err != nil {
   163  			t.Fatalf("failed to commit data: %v", err)
   164  		}
   165  		batch.Write()
   166  		queue = append(queue[:0], sched.Missing(count)...)
   167  	}
   168  	// Cross check that the two states are in sync
   169  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   170  }
   171  
   172  // Tests that the trie scheduler can correctly reconstruct the state even if only
   173  // partial results are returned, and the others sent only later.
   174  func TestIterativeDelayedStateSync(t *testing.T) {
   175  	// Create a random state to copy
   176  	srcDb, srcRoot, srcAccounts := makeTestState()
   177  
   178  	// Create a destination state and sync with the scheduler
   179  	dstDb := rawdb.NewMemoryDatabase()
   180  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
   181  
   182  	queue := append([]common.Hash{}, sched.Missing(0)...)
   183  	for len(queue) > 0 {
   184  		// Sync only half of the scheduled nodes
   185  		results := make([]trie.SyncResult, len(queue)/2+1)
   186  		for i, hash := range queue[:len(results)] {
   187  			data, err := srcDb.TrieDB().Node(hash)
   188  			if err != nil {
   189  				t.Fatalf("failed to retrieve node data for %x", hash)
   190  			}
   191  			results[i] = trie.SyncResult{Hash: hash, Data: data}
   192  		}
   193  		if _, index, err := sched.Process(results); err != nil {
   194  			t.Fatalf("failed to process result #%d: %v", index, err)
   195  		}
   196  		batch := dstDb.NewBatch()
   197  		if err := sched.Commit(batch); err != nil {
   198  			t.Fatalf("failed to commit data: %v", err)
   199  		}
   200  		batch.Write()
   201  		queue = append(queue[len(results):], sched.Missing(0)...)
   202  	}
   203  	// Cross check that the two states are in sync
   204  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   205  }
   206  
   207  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   208  // requesting retrieval tasks and returning all of them in one go, however in a
   209  // random order.
   210  func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
   211  func TestIterativeRandomStateSyncBatched(t *testing.T)    { testIterativeRandomStateSync(t, 100) }
   212  
   213  func testIterativeRandomStateSync(t *testing.T, count int) {
   214  	// Create a random state to copy
   215  	srcDb, srcRoot, srcAccounts := makeTestState()
   216  
   217  	// Create a destination state and sync with the scheduler
   218  	dstDb := rawdb.NewMemoryDatabase()
   219  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
   220  
   221  	queue := make(map[common.Hash]struct{})
   222  	for _, hash := range sched.Missing(count) {
   223  		queue[hash] = struct{}{}
   224  	}
   225  	for len(queue) > 0 {
   226  		// Fetch all the queued nodes in a random order
   227  		results := make([]trie.SyncResult, 0, len(queue))
   228  		for hash := range queue {
   229  			data, err := srcDb.TrieDB().Node(hash)
   230  			if err != nil {
   231  				t.Fatalf("failed to retrieve node data for %x", hash)
   232  			}
   233  			results = append(results, trie.SyncResult{Hash: hash, Data: data})
   234  		}
   235  		// Feed the retrieved results back and queue new tasks
   236  		if _, index, err := sched.Process(results); err != nil {
   237  			t.Fatalf("failed to process result #%d: %v", index, err)
   238  		}
   239  		batch := dstDb.NewBatch()
   240  		if err := sched.Commit(batch); err != nil {
   241  			t.Fatalf("failed to commit data: %v", err)
   242  		}
   243  		batch.Write()
   244  		queue = make(map[common.Hash]struct{})
   245  		for _, hash := range sched.Missing(count) {
   246  			queue[hash] = struct{}{}
   247  		}
   248  	}
   249  	// Cross check that the two states are in sync
   250  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   251  }
   252  
   253  // Tests that the trie scheduler can correctly reconstruct the state even if only
   254  // partial results are returned (Even those randomly), others sent only later.
   255  func TestIterativeRandomDelayedStateSync(t *testing.T) {
   256  	// Create a random state to copy
   257  	srcDb, srcRoot, srcAccounts := makeTestState()
   258  
   259  	// Create a destination state and sync with the scheduler
   260  	dstDb := rawdb.NewMemoryDatabase()
   261  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
   262  
   263  	queue := make(map[common.Hash]struct{})
   264  	for _, hash := range sched.Missing(0) {
   265  		queue[hash] = struct{}{}
   266  	}
   267  	for len(queue) > 0 {
   268  		// Sync only half of the scheduled nodes, even those in random order
   269  		results := make([]trie.SyncResult, 0, len(queue)/2+1)
   270  		for hash := range queue {
   271  			delete(queue, hash)
   272  
   273  			data, err := srcDb.TrieDB().Node(hash)
   274  			if err != nil {
   275  				t.Fatalf("failed to retrieve node data for %x", hash)
   276  			}
   277  			results = append(results, trie.SyncResult{Hash: hash, Data: data})
   278  
   279  			if len(results) >= cap(results) {
   280  				break
   281  			}
   282  		}
   283  		// Feed the retrieved results back and queue new tasks
   284  		if _, index, err := sched.Process(results); err != nil {
   285  			t.Fatalf("failed to process result #%d: %v", index, err)
   286  		}
   287  		batch := dstDb.NewBatch()
   288  		if err := sched.Commit(batch); err != nil {
   289  			t.Fatalf("failed to commit data: %v", err)
   290  		}
   291  		batch.Write()
   292  		for _, hash := range sched.Missing(0) {
   293  			queue[hash] = struct{}{}
   294  		}
   295  	}
   296  	// Cross check that the two states are in sync
   297  	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
   298  }
   299  
   300  // Tests that at any point in time during a sync, only complete sub-tries are in
   301  // the database.
   302  func TestIncompleteStateSync(t *testing.T) {
   303  	// Create a random state to copy
   304  	srcDb, srcRoot, srcAccounts := makeTestState()
   305  
   306  	checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot)
   307  
   308  	// Create a destination state and sync with the scheduler
   309  	dstDb := rawdb.NewMemoryDatabase()
   310  	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
   311  
   312  	added := []common.Hash{}
   313  	queue := append([]common.Hash{}, sched.Missing(1)...)
   314  	for len(queue) > 0 {
   315  		// Fetch a batch of state nodes
   316  		results := make([]trie.SyncResult, len(queue))
   317  		for i, hash := range queue {
   318  			data, err := srcDb.TrieDB().Node(hash)
   319  			if err != nil {
   320  				t.Fatalf("failed to retrieve node data for %x", hash)
   321  			}
   322  			results[i] = trie.SyncResult{Hash: hash, Data: data}
   323  		}
   324  		// Process each of the state nodes
   325  		if _, index, err := sched.Process(results); err != nil {
   326  			t.Fatalf("failed to process result #%d: %v", index, err)
   327  		}
   328  		batch := dstDb.NewBatch()
   329  		if err := sched.Commit(batch); err != nil {
   330  			t.Fatalf("failed to commit data: %v", err)
   331  		}
   332  		batch.Write()
   333  		for _, result := range results {
   334  			added = append(added, result.Hash)
   335  		}
   336  		// Check that all known sub-tries added so far are complete or missing entirely.
   337  	checkSubtries:
   338  		for _, hash := range added {
   339  			for _, acc := range srcAccounts {
   340  				if hash == crypto.Keccak256Hash(acc.code) {
   341  					continue checkSubtries // skip trie check of code nodes.
   342  				}
   343  			}
   344  			// Can't use checkStateConsistency here because subtrie keys may have odd
   345  			// length and crash in LeafKey.
   346  			if err := checkTrieConsistency(dstDb, hash); err != nil {
   347  				t.Fatalf("state inconsistent: %v", err)
   348  			}
   349  		}
   350  		// Fetch the next batch to retrieve
   351  		queue = append(queue[:0], sched.Missing(1)...)
   352  	}
   353  	// Sanity check that removing any node from the database is detected
   354  	for _, node := range added[1:] {
   355  		key := node.Bytes()
   356  		value, _ := dstDb.Get(key)
   357  
   358  		dstDb.Delete(key)
   359  		if err := checkStateConsistency(dstDb, added[0]); err == nil {
   360  			t.Fatalf("trie inconsistency not caught, missing: %x", key)
   361  		}
   362  		dstDb.Put(key, value)
   363  	}
   364  }