github.com/neatlab/neatio@v1.7.3-0.20220425043230-d903e92fcc75/chain/trie/sync_test.go (about)

     1  package trie
     2  
     3  import (
     4  	"bytes"
     5  	"testing"
     6  
     7  	"github.com/neatlab/neatio/neatdb/memorydb"
     8  	"github.com/neatlab/neatio/utilities/common"
     9  )
    10  
    11  func makeTestTrie() (*Database, *Trie, map[string][]byte) {
    12  
    13  	triedb := NewDatabase(memorydb.New())
    14  	trie, _ := New(common.Hash{}, triedb)
    15  
    16  	content := make(map[string][]byte)
    17  	for i := byte(0); i < 255; i++ {
    18  
    19  		key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
    20  		content[string(key)] = val
    21  		trie.Update(key, val)
    22  
    23  		key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
    24  		content[string(key)] = val
    25  		trie.Update(key, val)
    26  
    27  		for j := byte(3); j < 13; j++ {
    28  			key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
    29  			content[string(key)] = val
    30  			trie.Update(key, val)
    31  		}
    32  	}
    33  	trie.Commit(nil)
    34  
    35  	return triedb, trie, content
    36  }
    37  
    38  func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) {
    39  
    40  	trie, err := New(common.BytesToHash(root), db)
    41  	if err != nil {
    42  		t.Fatalf("failed to create trie at %x: %v", root, err)
    43  	}
    44  	if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil {
    45  		t.Fatalf("inconsistent trie at %x: %v", root, err)
    46  	}
    47  	for key, val := range content {
    48  		if have := trie.Get([]byte(key)); !bytes.Equal(have, val) {
    49  			t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
    50  		}
    51  	}
    52  }
    53  
    54  func checkTrieConsistency(db *Database, root common.Hash) error {
    55  
    56  	trie, err := New(root, db)
    57  	if err != nil {
    58  		return nil
    59  	}
    60  	it := trie.NodeIterator(nil)
    61  	for it.Next(true) {
    62  	}
    63  	return it.Error()
    64  }
    65  
    66  func TestEmptySync(t *testing.T) {
    67  	dbA := NewDatabase(memorydb.New())
    68  	dbB := NewDatabase(memorydb.New())
    69  	emptyA, _ := New(common.Hash{}, dbA)
    70  	emptyB, _ := New(emptyRoot, dbB)
    71  
    72  	for i, trie := range []*Trie{emptyA, emptyB} {
    73  		if req := NewSync(trie.Hash(), memorydb.New(), nil).Missing(1); len(req) != 0 {
    74  			t.Errorf("test %d: content requested for empty trie: %v", i, req)
    75  		}
    76  	}
    77  }
    78  
    79  func TestIterativeSyncIndividual(t *testing.T) { testIterativeSync(t, 1) }
    80  func TestIterativeSyncBatched(t *testing.T)    { testIterativeSync(t, 100) }
    81  
    82  func testIterativeSync(t *testing.T, batch int) {
    83  
    84  	srcDb, srcTrie, srcData := makeTestTrie()
    85  
    86  	diskdb := memorydb.New()
    87  	triedb := NewDatabase(diskdb)
    88  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
    89  
    90  	queue := append([]common.Hash{}, sched.Missing(batch)...)
    91  	for len(queue) > 0 {
    92  		results := make([]SyncResult, len(queue))
    93  		for i, hash := range queue {
    94  			data, err := srcDb.Node(hash)
    95  			if err != nil {
    96  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
    97  			}
    98  			results[i] = SyncResult{hash, data}
    99  		}
   100  		if _, index, err := sched.Process(results); err != nil {
   101  			t.Fatalf("failed to process result #%d: %v", index, err)
   102  		}
   103  		if index, err := sched.Commit(diskdb); err != nil {
   104  			t.Fatalf("failed to commit data #%d: %v", index, err)
   105  		}
   106  		queue = append(queue[:0], sched.Missing(batch)...)
   107  	}
   108  
   109  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   110  }
   111  
   112  func TestIterativeDelayedSync(t *testing.T) {
   113  
   114  	srcDb, srcTrie, srcData := makeTestTrie()
   115  
   116  	diskdb := memorydb.New()
   117  	triedb := NewDatabase(diskdb)
   118  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   119  
   120  	queue := append([]common.Hash{}, sched.Missing(10000)...)
   121  	for len(queue) > 0 {
   122  
   123  		results := make([]SyncResult, len(queue)/2+1)
   124  		for i, hash := range queue[:len(results)] {
   125  			data, err := srcDb.Node(hash)
   126  			if err != nil {
   127  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   128  			}
   129  			results[i] = SyncResult{hash, data}
   130  		}
   131  		if _, index, err := sched.Process(results); err != nil {
   132  			t.Fatalf("failed to process result #%d: %v", index, err)
   133  		}
   134  		if index, err := sched.Commit(diskdb); err != nil {
   135  			t.Fatalf("failed to commit data #%d: %v", index, err)
   136  		}
   137  		queue = append(queue[len(results):], sched.Missing(10000)...)
   138  	}
   139  
   140  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   141  }
   142  
   143  func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1) }
   144  func TestIterativeRandomSyncBatched(t *testing.T)    { testIterativeRandomSync(t, 100) }
   145  
   146  func testIterativeRandomSync(t *testing.T, batch int) {
   147  
   148  	srcDb, srcTrie, srcData := makeTestTrie()
   149  
   150  	diskdb := memorydb.New()
   151  	triedb := NewDatabase(diskdb)
   152  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   153  
   154  	queue := make(map[common.Hash]struct{})
   155  	for _, hash := range sched.Missing(batch) {
   156  		queue[hash] = struct{}{}
   157  	}
   158  	for len(queue) > 0 {
   159  
   160  		results := make([]SyncResult, 0, len(queue))
   161  		for hash := range queue {
   162  			data, err := srcDb.Node(hash)
   163  			if err != nil {
   164  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   165  			}
   166  			results = append(results, SyncResult{hash, data})
   167  		}
   168  
   169  		if _, index, err := sched.Process(results); err != nil {
   170  			t.Fatalf("failed to process result #%d: %v", index, err)
   171  		}
   172  		if index, err := sched.Commit(diskdb); err != nil {
   173  			t.Fatalf("failed to commit data #%d: %v", index, err)
   174  		}
   175  		queue = make(map[common.Hash]struct{})
   176  		for _, hash := range sched.Missing(batch) {
   177  			queue[hash] = struct{}{}
   178  		}
   179  	}
   180  
   181  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   182  }
   183  
   184  func TestIterativeRandomDelayedSync(t *testing.T) {
   185  
   186  	srcDb, srcTrie, srcData := makeTestTrie()
   187  
   188  	diskdb := memorydb.New()
   189  	triedb := NewDatabase(diskdb)
   190  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   191  
   192  	queue := make(map[common.Hash]struct{})
   193  	for _, hash := range sched.Missing(10000) {
   194  		queue[hash] = struct{}{}
   195  	}
   196  	for len(queue) > 0 {
   197  
   198  		results := make([]SyncResult, 0, len(queue)/2+1)
   199  		for hash := range queue {
   200  			data, err := srcDb.Node(hash)
   201  			if err != nil {
   202  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   203  			}
   204  			results = append(results, SyncResult{hash, data})
   205  
   206  			if len(results) >= cap(results) {
   207  				break
   208  			}
   209  		}
   210  
   211  		if _, index, err := sched.Process(results); err != nil {
   212  			t.Fatalf("failed to process result #%d: %v", index, err)
   213  		}
   214  		if index, err := sched.Commit(diskdb); err != nil {
   215  			t.Fatalf("failed to commit data #%d: %v", index, err)
   216  		}
   217  		for _, result := range results {
   218  			delete(queue, result.Hash)
   219  		}
   220  		for _, hash := range sched.Missing(10000) {
   221  			queue[hash] = struct{}{}
   222  		}
   223  	}
   224  
   225  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   226  }
   227  
   228  func TestDuplicateAvoidanceSync(t *testing.T) {
   229  
   230  	srcDb, srcTrie, srcData := makeTestTrie()
   231  
   232  	diskdb := memorydb.New()
   233  	triedb := NewDatabase(diskdb)
   234  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   235  
   236  	queue := append([]common.Hash{}, sched.Missing(0)...)
   237  	requested := make(map[common.Hash]struct{})
   238  
   239  	for len(queue) > 0 {
   240  		results := make([]SyncResult, len(queue))
   241  		for i, hash := range queue {
   242  			data, err := srcDb.Node(hash)
   243  			if err != nil {
   244  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   245  			}
   246  			if _, ok := requested[hash]; ok {
   247  				t.Errorf("hash %x already requested once", hash)
   248  			}
   249  			requested[hash] = struct{}{}
   250  
   251  			results[i] = SyncResult{hash, data}
   252  		}
   253  		if _, index, err := sched.Process(results); err != nil {
   254  			t.Fatalf("failed to process result #%d: %v", index, err)
   255  		}
   256  		if index, err := sched.Commit(diskdb); err != nil {
   257  			t.Fatalf("failed to commit data #%d: %v", index, err)
   258  		}
   259  		queue = append(queue[:0], sched.Missing(0)...)
   260  	}
   261  
   262  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   263  }
   264  
   265  func TestIncompleteSync(t *testing.T) {
   266  
   267  	srcDb, srcTrie, _ := makeTestTrie()
   268  
   269  	diskdb := memorydb.New()
   270  	triedb := NewDatabase(diskdb)
   271  	sched := NewSync(srcTrie.Hash(), diskdb, nil)
   272  
   273  	var added []common.Hash
   274  	queue := append([]common.Hash{}, sched.Missing(1)...)
   275  	for len(queue) > 0 {
   276  
   277  		results := make([]SyncResult, len(queue))
   278  		for i, hash := range queue {
   279  			data, err := srcDb.Node(hash)
   280  			if err != nil {
   281  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   282  			}
   283  			results[i] = SyncResult{hash, data}
   284  		}
   285  
   286  		if _, index, err := sched.Process(results); err != nil {
   287  			t.Fatalf("failed to process result #%d: %v", index, err)
   288  		}
   289  		if index, err := sched.Commit(diskdb); err != nil {
   290  			t.Fatalf("failed to commit data #%d: %v", index, err)
   291  		}
   292  		for _, result := range results {
   293  			added = append(added, result.Hash)
   294  		}
   295  
   296  		for _, root := range added {
   297  			if err := checkTrieConsistency(triedb, root); err != nil {
   298  				t.Fatalf("trie inconsistent: %v", err)
   299  			}
   300  		}
   301  
   302  		queue = append(queue[:0], sched.Missing(1)...)
   303  	}
   304  
   305  	for _, node := range added[1:] {
   306  		key := node.Bytes()
   307  		value, _ := diskdb.Get(key)
   308  
   309  		diskdb.Delete(key)
   310  		if err := checkTrieConsistency(triedb, added[0]); err == nil {
   311  			t.Fatalf("trie inconsistency not caught, missing: %x", key)
   312  		}
   313  		diskdb.Put(key, value)
   314  	}
   315  }