github.com/rayrapetyan/go-ethereum@v1.8.21/trie/sync_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package trie
    18  
    19  import (
    20  	"bytes"
    21  	"testing"
    22  
    23  	"github.com/ethereum/go-ethereum/common"
    24  	"github.com/ethereum/go-ethereum/ethdb"
    25  )
    26  
    27  // makeTestTrie create a sample test trie to test node-wise reconstruction.
    28  func makeTestTrie() (*Database, *Trie, map[string][]byte) {
    29  	// Create an empty trie
    30  	triedb := NewDatabase(ethdb.NewMemDatabase())
    31  	trie, _ := New(common.Hash{}, triedb)
    32  
    33  	// Fill it with some arbitrary data
    34  	content := make(map[string][]byte)
    35  	for i := byte(0); i < 255; i++ {
    36  		// Map the same data under multiple keys
    37  		key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
    38  		content[string(key)] = val
    39  		trie.Update(key, val)
    40  
    41  		key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
    42  		content[string(key)] = val
    43  		trie.Update(key, val)
    44  
    45  		// Add some other data to inflate the trie
    46  		for j := byte(3); j < 13; j++ {
    47  			key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
    48  			content[string(key)] = val
    49  			trie.Update(key, val)
    50  		}
    51  	}
    52  	trie.Commit(nil)
    53  
    54  	// Return the generated trie
    55  	return triedb, trie, content
    56  }
    57  
    58  // checkTrieContents cross references a reconstructed trie with an expected data
    59  // content map.
    60  func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) {
    61  	// Check root availability and trie contents
    62  	trie, err := New(common.BytesToHash(root), db)
    63  	if err != nil {
    64  		t.Fatalf("failed to create trie at %x: %v", root, err)
    65  	}
    66  	if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil {
    67  		t.Fatalf("inconsistent trie at %x: %v", root, err)
    68  	}
    69  	for key, val := range content {
    70  		if have := trie.Get([]byte(key)); !bytes.Equal(have, val) {
    71  			t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
    72  		}
    73  	}
    74  }
    75  
    76  // checkTrieConsistency checks that all nodes in a trie are indeed present.
    77  func checkTrieConsistency(db *Database, root common.Hash) error {
    78  	// Create and iterate a trie rooted in a subnode
    79  	trie, err := New(root, db)
    80  	if err != nil {
    81  		return nil // Consider a non existent state consistent
    82  	}
    83  	it := trie.NodeIterator(nil)
    84  	for it.Next(true) {
    85  	}
    86  	return it.Error()
    87  }
    88  
    89  // Tests that an empty trie is not scheduled for syncing.
    90  func TestEmptySync(t *testing.T) {
    91  	dbA := NewDatabase(ethdb.NewMemDatabase())
    92  	dbB := NewDatabase(ethdb.NewMemDatabase())
    93  	emptyA, _ := New(common.Hash{}, dbA)
    94  	emptyB, _ := New(emptyRoot, dbB)
    95  
    96  	for i, trie := range []*Trie{emptyA, emptyB} {
    97  		if req := NewSync(trie.Hash(), ethdb.NewMemDatabase(), nil).Missing(1); len(req) != 0 {
    98  			t.Errorf("test %d: content requested for empty trie: %v", i, req)
    99  		}
   100  	}
   101  }
   102  
   103  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   104  // requesting retrieval tasks and returning all of them in one go.
   105  func TestIterativeSyncIndividual(t *testing.T) { testIterativeSync(t, 1) }
   106  func TestIterativeSyncBatched(t *testing.T)    { testIterativeSync(t, 100) }
   107  
   108  func testIterativeSync(t *testing.T, batch int) {
   109  	// Create a random trie to copy
   110  	srcDb, srcTrie, srcData := makeTestTrie()
   111  
   112  	// Create a destination trie and sync with the scheduler
   113  	diskdb := ethdb.NewMemDatabase()
   114  	triedb := NewDatabase(diskdb)
   115  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   116  
   117  	queue := append([]common.Hash{}, sched.Missing(batch)...)
   118  	for len(queue) > 0 {
   119  		results := make([]SyncResult, len(queue))
   120  		for i, hash := range queue {
   121  			data, err := srcDb.Node(hash)
   122  			if err != nil {
   123  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   124  			}
   125  			results[i] = SyncResult{hash, data}
   126  		}
   127  		if _, index, err := sched.Process(results); err != nil {
   128  			t.Fatalf("failed to process result #%d: %v", index, err)
   129  		}
   130  		if index, err := sched.Commit(diskdb); err != nil {
   131  			t.Fatalf("failed to commit data #%d: %v", index, err)
   132  		}
   133  		queue = append(queue[:0], sched.Missing(batch)...)
   134  	}
   135  	// Cross check that the two tries are in sync
   136  	checkTrieContents(t, triedb, srcTrie.Root(), srcData)
   137  }
   138  
   139  // Tests that the trie scheduler can correctly reconstruct the state even if only
   140  // partial results are returned, and the others sent only later.
   141  func TestIterativeDelayedSync(t *testing.T) {
   142  	// Create a random trie to copy
   143  	srcDb, srcTrie, srcData := makeTestTrie()
   144  
   145  	// Create a destination trie and sync with the scheduler
   146  	diskdb := ethdb.NewMemDatabase()
   147  	triedb := NewDatabase(diskdb)
   148  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   149  
   150  	queue := append([]common.Hash{}, sched.Missing(10000)...)
   151  	for len(queue) > 0 {
   152  		// Sync only half of the scheduled nodes
   153  		results := make([]SyncResult, len(queue)/2+1)
   154  		for i, hash := range queue[:len(results)] {
   155  			data, err := srcDb.Node(hash)
   156  			if err != nil {
   157  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   158  			}
   159  			results[i] = SyncResult{hash, data}
   160  		}
   161  		if _, index, err := sched.Process(results); err != nil {
   162  			t.Fatalf("failed to process result #%d: %v", index, err)
   163  		}
   164  		if index, err := sched.Commit(diskdb); err != nil {
   165  			t.Fatalf("failed to commit data #%d: %v", index, err)
   166  		}
   167  		queue = append(queue[len(results):], sched.Missing(10000)...)
   168  	}
   169  	// Cross check that the two tries are in sync
   170  	checkTrieContents(t, triedb, srcTrie.Root(), srcData)
   171  }
   172  
   173  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   174  // requesting retrieval tasks and returning all of them in one go, however in a
   175  // random order.
   176  func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1) }
   177  func TestIterativeRandomSyncBatched(t *testing.T)    { testIterativeRandomSync(t, 100) }
   178  
   179  func testIterativeRandomSync(t *testing.T, batch int) {
   180  	// Create a random trie to copy
   181  	srcDb, srcTrie, srcData := makeTestTrie()
   182  
   183  	// Create a destination trie and sync with the scheduler
   184  	diskdb := ethdb.NewMemDatabase()
   185  	triedb := NewDatabase(diskdb)
   186  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   187  
   188  	queue := make(map[common.Hash]struct{})
   189  	for _, hash := range sched.Missing(batch) {
   190  		queue[hash] = struct{}{}
   191  	}
   192  	for len(queue) > 0 {
   193  		// Fetch all the queued nodes in a random order
   194  		results := make([]SyncResult, 0, len(queue))
   195  		for hash := range queue {
   196  			data, err := srcDb.Node(hash)
   197  			if err != nil {
   198  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   199  			}
   200  			results = append(results, SyncResult{hash, data})
   201  		}
   202  		// Feed the retrieved results back and queue new tasks
   203  		if _, index, err := sched.Process(results); err != nil {
   204  			t.Fatalf("failed to process result #%d: %v", index, err)
   205  		}
   206  		if index, err := sched.Commit(diskdb); err != nil {
   207  			t.Fatalf("failed to commit data #%d: %v", index, err)
   208  		}
   209  		queue = make(map[common.Hash]struct{})
   210  		for _, hash := range sched.Missing(batch) {
   211  			queue[hash] = struct{}{}
   212  		}
   213  	}
   214  	// Cross check that the two tries are in sync
   215  	checkTrieContents(t, triedb, srcTrie.Root(), srcData)
   216  }
   217  
   218  // Tests that the trie scheduler can correctly reconstruct the state even if only
   219  // partial results are returned (Even those randomly), others sent only later.
   220  func TestIterativeRandomDelayedSync(t *testing.T) {
   221  	// Create a random trie to copy
   222  	srcDb, srcTrie, srcData := makeTestTrie()
   223  
   224  	// Create a destination trie and sync with the scheduler
   225  	diskdb := ethdb.NewMemDatabase()
   226  	triedb := NewDatabase(diskdb)
   227  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   228  
   229  	queue := make(map[common.Hash]struct{})
   230  	for _, hash := range sched.Missing(10000) {
   231  		queue[hash] = struct{}{}
   232  	}
   233  	for len(queue) > 0 {
   234  		// Sync only half of the scheduled nodes, even those in random order
   235  		results := make([]SyncResult, 0, len(queue)/2+1)
   236  		for hash := range queue {
   237  			data, err := srcDb.Node(hash)
   238  			if err != nil {
   239  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   240  			}
   241  			results = append(results, SyncResult{hash, data})
   242  
   243  			if len(results) >= cap(results) {
   244  				break
   245  			}
   246  		}
   247  		// Feed the retrieved results back and queue new tasks
   248  		if _, index, err := sched.Process(results); err != nil {
   249  			t.Fatalf("failed to process result #%d: %v", index, err)
   250  		}
   251  		if index, err := sched.Commit(diskdb); err != nil {
   252  			t.Fatalf("failed to commit data #%d: %v", index, err)
   253  		}
   254  		for _, result := range results {
   255  			delete(queue, result.Hash)
   256  		}
   257  		for _, hash := range sched.Missing(10000) {
   258  			queue[hash] = struct{}{}
   259  		}
   260  	}
   261  	// Cross check that the two tries are in sync
   262  	checkTrieContents(t, triedb, srcTrie.Root(), srcData)
   263  }
   264  
   265  // Tests that a trie sync will not request nodes multiple times, even if they
   266  // have such references.
   267  func TestDuplicateAvoidanceSync(t *testing.T) {
   268  	// Create a random trie to copy
   269  	srcDb, srcTrie, srcData := makeTestTrie()
   270  
   271  	// Create a destination trie and sync with the scheduler
   272  	diskdb := ethdb.NewMemDatabase()
   273  	triedb := NewDatabase(diskdb)
   274  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   275  
   276  	queue := append([]common.Hash{}, sched.Missing(0)...)
   277  	requested := make(map[common.Hash]struct{})
   278  
   279  	for len(queue) > 0 {
   280  		results := make([]SyncResult, len(queue))
   281  		for i, hash := range queue {
   282  			data, err := srcDb.Node(hash)
   283  			if err != nil {
   284  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   285  			}
   286  			if _, ok := requested[hash]; ok {
   287  				t.Errorf("hash %x already requested once", hash)
   288  			}
   289  			requested[hash] = struct{}{}
   290  
   291  			results[i] = SyncResult{hash, data}
   292  		}
   293  		if _, index, err := sched.Process(results); err != nil {
   294  			t.Fatalf("failed to process result #%d: %v", index, err)
   295  		}
   296  		if index, err := sched.Commit(diskdb); err != nil {
   297  			t.Fatalf("failed to commit data #%d: %v", index, err)
   298  		}
   299  		queue = append(queue[:0], sched.Missing(0)...)
   300  	}
   301  	// Cross check that the two tries are in sync
   302  	checkTrieContents(t, triedb, srcTrie.Root(), srcData)
   303  }
   304  
   305  // Tests that at any point in time during a sync, only complete sub-tries are in
   306  // the database.
   307  func TestIncompleteSync(t *testing.T) {
   308  	// Create a random trie to copy
   309  	srcDb, srcTrie, _ := makeTestTrie()
   310  
   311  	// Create a destination trie and sync with the scheduler
   312  	diskdb := ethdb.NewMemDatabase()
   313  	triedb := NewDatabase(diskdb)
   314  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   315  
   316  	added := []common.Hash{}
   317  	queue := append([]common.Hash{}, sched.Missing(1)...)
   318  	for len(queue) > 0 {
   319  		// Fetch a batch of trie nodes
   320  		results := make([]SyncResult, len(queue))
   321  		for i, hash := range queue {
   322  			data, err := srcDb.Node(hash)
   323  			if err != nil {
   324  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   325  			}
   326  			results[i] = SyncResult{hash, data}
   327  		}
   328  		// Process each of the trie nodes
   329  		if _, index, err := sched.Process(results); err != nil {
   330  			t.Fatalf("failed to process result #%d: %v", index, err)
   331  		}
   332  		if index, err := sched.Commit(diskdb); err != nil {
   333  			t.Fatalf("failed to commit data #%d: %v", index, err)
   334  		}
   335  		for _, result := range results {
   336  			added = append(added, result.Hash)
   337  		}
   338  		// Check that all known sub-tries in the synced trie are complete
   339  		for _, root := range added {
   340  			if err := checkTrieConsistency(triedb, root); err != nil {
   341  				t.Fatalf("trie inconsistent: %v", err)
   342  			}
   343  		}
   344  		// Fetch the next batch to retrieve
   345  		queue = append(queue[:0], sched.Missing(1)...)
   346  	}
   347  	// Sanity check that removing any node from the database is detected
   348  	for _, node := range added[1:] {
   349  		key := node.Bytes()
   350  		value, _ := diskdb.Get(key)
   351  
   352  		diskdb.Delete(key)
   353  		if err := checkTrieConsistency(triedb, added[0]); err == nil {
   354  			t.Fatalf("trie inconsistency not caught, missing: %x", key)
   355  		}
   356  		diskdb.Put(key, value)
   357  	}
   358  }