github.com/mprishchepo/go-ethereum@v1.9.7-0.20191031044858-21506be82b68/trie/sync_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "bytes" 21 "testing" 22 23 "github.com/Fantom-foundation/go-ethereum/common" 24 "github.com/Fantom-foundation/go-ethereum/ethdb/memorydb" 25 ) 26 27 // makeTestTrie create a sample test trie to test node-wise reconstruction. 28 func makeTestTrie() (*Database, *Trie, map[string][]byte) { 29 // Create an empty trie 30 triedb := NewDatabase(memorydb.New()) 31 trie, _ := New(common.Hash{}, triedb) 32 33 // Fill it with some arbitrary data 34 content := make(map[string][]byte) 35 for i := byte(0); i < 255; i++ { 36 // Map the same data under multiple keys 37 key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} 38 content[string(key)] = val 39 trie.Update(key, val) 40 41 key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} 42 content[string(key)] = val 43 trie.Update(key, val) 44 45 // Add some other data to inflate the trie 46 for j := byte(3); j < 13; j++ { 47 key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} 48 content[string(key)] = val 49 trie.Update(key, val) 50 } 51 } 52 trie.Commit(nil) 53 54 // Return the generated trie 55 return triedb, trie, content 56 } 57 58 // checkTrieContents cross references a reconstructed trie with an expected data 59 // content map. 60 func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) { 61 // Check root availability and trie contents 62 trie, err := New(common.BytesToHash(root), db) 63 if err != nil { 64 t.Fatalf("failed to create trie at %x: %v", root, err) 65 } 66 if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil { 67 t.Fatalf("inconsistent trie at %x: %v", root, err) 68 } 69 for key, val := range content { 70 if have := trie.Get([]byte(key)); !bytes.Equal(have, val) { 71 t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) 72 } 73 } 74 } 75 76 // checkTrieConsistency checks that all nodes in a trie are indeed present. 77 func checkTrieConsistency(db *Database, root common.Hash) error { 78 // Create and iterate a trie rooted in a subnode 79 trie, err := New(root, db) 80 if err != nil { 81 return nil // Consider a non existent state consistent 82 } 83 it := trie.NodeIterator(nil) 84 for it.Next(true) { 85 } 86 return it.Error() 87 } 88 89 // Tests that an empty trie is not scheduled for syncing. 90 func TestEmptySync(t *testing.T) { 91 dbA := NewDatabase(memorydb.New()) 92 dbB := NewDatabase(memorydb.New()) 93 emptyA, _ := New(common.Hash{}, dbA) 94 emptyB, _ := New(emptyRoot, dbB) 95 96 for i, trie := range []*Trie{emptyA, emptyB} { 97 if req := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New())).Missing(1); len(req) != 0 { 98 t.Errorf("test %d: content requested for empty trie: %v", i, req) 99 } 100 } 101 } 102 103 // Tests that given a root hash, a trie can sync iteratively on a single thread, 104 // requesting retrieval tasks and returning all of them in one go. 105 func TestIterativeSyncIndividual(t *testing.T) { testIterativeSync(t, 1) } 106 func TestIterativeSyncBatched(t *testing.T) { testIterativeSync(t, 100) } 107 108 func testIterativeSync(t *testing.T, count int) { 109 // Create a random trie to copy 110 srcDb, srcTrie, srcData := makeTestTrie() 111 112 // Create a destination trie and sync with the scheduler 113 diskdb := memorydb.New() 114 triedb := NewDatabase(diskdb) 115 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 116 117 queue := append([]common.Hash{}, sched.Missing(count)...) 118 for len(queue) > 0 { 119 results := make([]SyncResult, len(queue)) 120 for i, hash := range queue { 121 data, err := srcDb.Node(hash) 122 if err != nil { 123 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 124 } 125 results[i] = SyncResult{hash, data} 126 } 127 if _, index, err := sched.Process(results); err != nil { 128 t.Fatalf("failed to process result #%d: %v", index, err) 129 } 130 batch := diskdb.NewBatch() 131 if err := sched.Commit(batch); err != nil { 132 t.Fatalf("failed to commit data: %v", err) 133 } 134 batch.Write() 135 queue = append(queue[:0], sched.Missing(count)...) 136 } 137 // Cross check that the two tries are in sync 138 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 139 } 140 141 // Tests that the trie scheduler can correctly reconstruct the state even if only 142 // partial results are returned, and the others sent only later. 143 func TestIterativeDelayedSync(t *testing.T) { 144 // Create a random trie to copy 145 srcDb, srcTrie, srcData := makeTestTrie() 146 147 // Create a destination trie and sync with the scheduler 148 diskdb := memorydb.New() 149 triedb := NewDatabase(diskdb) 150 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 151 152 queue := append([]common.Hash{}, sched.Missing(10000)...) 153 for len(queue) > 0 { 154 // Sync only half of the scheduled nodes 155 results := make([]SyncResult, len(queue)/2+1) 156 for i, hash := range queue[:len(results)] { 157 data, err := srcDb.Node(hash) 158 if err != nil { 159 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 160 } 161 results[i] = SyncResult{hash, data} 162 } 163 if _, index, err := sched.Process(results); err != nil { 164 t.Fatalf("failed to process result #%d: %v", index, err) 165 } 166 batch := diskdb.NewBatch() 167 if err := sched.Commit(batch); err != nil { 168 t.Fatalf("failed to commit data: %v", err) 169 } 170 batch.Write() 171 queue = append(queue[len(results):], sched.Missing(10000)...) 172 } 173 // Cross check that the two tries are in sync 174 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 175 } 176 177 // Tests that given a root hash, a trie can sync iteratively on a single thread, 178 // requesting retrieval tasks and returning all of them in one go, however in a 179 // random order. 180 func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1) } 181 func TestIterativeRandomSyncBatched(t *testing.T) { testIterativeRandomSync(t, 100) } 182 183 func testIterativeRandomSync(t *testing.T, count int) { 184 // Create a random trie to copy 185 srcDb, srcTrie, srcData := makeTestTrie() 186 187 // Create a destination trie and sync with the scheduler 188 diskdb := memorydb.New() 189 triedb := NewDatabase(diskdb) 190 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 191 192 queue := make(map[common.Hash]struct{}) 193 for _, hash := range sched.Missing(count) { 194 queue[hash] = struct{}{} 195 } 196 for len(queue) > 0 { 197 // Fetch all the queued nodes in a random order 198 results := make([]SyncResult, 0, len(queue)) 199 for hash := range queue { 200 data, err := srcDb.Node(hash) 201 if err != nil { 202 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 203 } 204 results = append(results, SyncResult{hash, data}) 205 } 206 // Feed the retrieved results back and queue new tasks 207 if _, index, err := sched.Process(results); err != nil { 208 t.Fatalf("failed to process result #%d: %v", index, err) 209 } 210 batch := diskdb.NewBatch() 211 if err := sched.Commit(batch); err != nil { 212 t.Fatalf("failed to commit data: %v", err) 213 } 214 batch.Write() 215 queue = make(map[common.Hash]struct{}) 216 for _, hash := range sched.Missing(count) { 217 queue[hash] = struct{}{} 218 } 219 } 220 // Cross check that the two tries are in sync 221 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 222 } 223 224 // Tests that the trie scheduler can correctly reconstruct the state even if only 225 // partial results are returned (Even those randomly), others sent only later. 226 func TestIterativeRandomDelayedSync(t *testing.T) { 227 // Create a random trie to copy 228 srcDb, srcTrie, srcData := makeTestTrie() 229 230 // Create a destination trie and sync with the scheduler 231 diskdb := memorydb.New() 232 triedb := NewDatabase(diskdb) 233 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 234 235 queue := make(map[common.Hash]struct{}) 236 for _, hash := range sched.Missing(10000) { 237 queue[hash] = struct{}{} 238 } 239 for len(queue) > 0 { 240 // Sync only half of the scheduled nodes, even those in random order 241 results := make([]SyncResult, 0, len(queue)/2+1) 242 for hash := range queue { 243 data, err := srcDb.Node(hash) 244 if err != nil { 245 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 246 } 247 results = append(results, SyncResult{hash, data}) 248 249 if len(results) >= cap(results) { 250 break 251 } 252 } 253 // Feed the retrieved results back and queue new tasks 254 if _, index, err := sched.Process(results); err != nil { 255 t.Fatalf("failed to process result #%d: %v", index, err) 256 } 257 batch := diskdb.NewBatch() 258 if err := sched.Commit(batch); err != nil { 259 t.Fatalf("failed to commit data: %v", err) 260 } 261 batch.Write() 262 for _, result := range results { 263 delete(queue, result.Hash) 264 } 265 for _, hash := range sched.Missing(10000) { 266 queue[hash] = struct{}{} 267 } 268 } 269 // Cross check that the two tries are in sync 270 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 271 } 272 273 // Tests that a trie sync will not request nodes multiple times, even if they 274 // have such references. 275 func TestDuplicateAvoidanceSync(t *testing.T) { 276 // Create a random trie to copy 277 srcDb, srcTrie, srcData := makeTestTrie() 278 279 // Create a destination trie and sync with the scheduler 280 diskdb := memorydb.New() 281 triedb := NewDatabase(diskdb) 282 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 283 284 queue := append([]common.Hash{}, sched.Missing(0)...) 285 requested := make(map[common.Hash]struct{}) 286 287 for len(queue) > 0 { 288 results := make([]SyncResult, len(queue)) 289 for i, hash := range queue { 290 data, err := srcDb.Node(hash) 291 if err != nil { 292 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 293 } 294 if _, ok := requested[hash]; ok { 295 t.Errorf("hash %x already requested once", hash) 296 } 297 requested[hash] = struct{}{} 298 299 results[i] = SyncResult{hash, data} 300 } 301 if _, index, err := sched.Process(results); err != nil { 302 t.Fatalf("failed to process result #%d: %v", index, err) 303 } 304 batch := diskdb.NewBatch() 305 if err := sched.Commit(batch); err != nil { 306 t.Fatalf("failed to commit data: %v", err) 307 } 308 batch.Write() 309 queue = append(queue[:0], sched.Missing(0)...) 310 } 311 // Cross check that the two tries are in sync 312 checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) 313 } 314 315 // Tests that at any point in time during a sync, only complete sub-tries are in 316 // the database. 317 func TestIncompleteSync(t *testing.T) { 318 // Create a random trie to copy 319 srcDb, srcTrie, _ := makeTestTrie() 320 321 // Create a destination trie and sync with the scheduler 322 diskdb := memorydb.New() 323 triedb := NewDatabase(diskdb) 324 sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) 325 326 var added []common.Hash 327 queue := append([]common.Hash{}, sched.Missing(1)...) 328 for len(queue) > 0 { 329 // Fetch a batch of trie nodes 330 results := make([]SyncResult, len(queue)) 331 for i, hash := range queue { 332 data, err := srcDb.Node(hash) 333 if err != nil { 334 t.Fatalf("failed to retrieve node data for %x: %v", hash, err) 335 } 336 results[i] = SyncResult{hash, data} 337 } 338 // Process each of the trie nodes 339 if _, index, err := sched.Process(results); err != nil { 340 t.Fatalf("failed to process result #%d: %v", index, err) 341 } 342 batch := diskdb.NewBatch() 343 if err := sched.Commit(batch); err != nil { 344 t.Fatalf("failed to commit data: %v", err) 345 } 346 batch.Write() 347 for _, result := range results { 348 added = append(added, result.Hash) 349 } 350 // Check that all known sub-tries in the synced trie are complete 351 for _, root := range added { 352 if err := checkTrieConsistency(triedb, root); err != nil { 353 t.Fatalf("trie inconsistent: %v", err) 354 } 355 } 356 // Fetch the next batch to retrieve 357 queue = append(queue[:0], sched.Missing(1)...) 358 } 359 // Sanity check that removing any node from the database is detected 360 for _, node := range added[1:] { 361 key := node.Bytes() 362 value, _ := diskdb.Get(key) 363 364 diskdb.Delete(key) 365 if err := checkTrieConsistency(triedb, added[0]); err == nil { 366 t.Fatalf("trie inconsistency not caught, missing: %x", key) 367 } 368 diskdb.Put(key, value) 369 } 370 }