github.com/calmw/ethereum@v0.1.1/trie/sync_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "bytes" 21 "fmt" 22 "testing" 23 24 "github.com/calmw/ethereum/common" 25 "github.com/calmw/ethereum/core/rawdb" 26 "github.com/calmw/ethereum/core/types" 27 "github.com/calmw/ethereum/crypto" 28 "github.com/calmw/ethereum/ethdb" 29 "github.com/calmw/ethereum/ethdb/memorydb" 30 "github.com/calmw/ethereum/trie/trienode" 31 ) 32 33 // makeTestTrie create a sample test trie to test node-wise reconstruction. 34 func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[string][]byte) { 35 // Create an empty trie 36 db := rawdb.NewMemoryDatabase() 37 triedb := newTestDatabase(db, scheme) 38 trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) 39 40 // Fill it with some arbitrary data 41 content := make(map[string][]byte) 42 for i := byte(0); i < 255; i++ { 43 // Map the same data under multiple keys 44 key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} 45 content[string(key)] = val 46 trie.MustUpdate(key, val) 47 48 key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} 49 content[string(key)] = val 50 trie.MustUpdate(key, val) 51 52 // Add some other data to inflate the trie 53 for j := byte(3); j < 13; j++ { 54 key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} 55 content[string(key)] = val 56 trie.MustUpdate(key, val) 57 } 58 } 59 root, nodes := trie.Commit(false) 60 if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil { 61 panic(fmt.Errorf("failed to commit db %v", err)) 62 } 63 if err := triedb.Commit(root, false); err != nil { 64 panic(err) 65 } 66 // Re-create the trie based on the new state 67 trie, _ = NewStateTrie(TrieID(root), triedb) 68 return db, triedb, trie, content 69 } 70 71 // checkTrieContents cross references a reconstructed trie with an expected data 72 // content map. 73 func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte) { 74 // Check root availability and trie contents 75 ndb := newTestDatabase(db, scheme) 76 trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb) 77 if err != nil { 78 t.Fatalf("failed to create trie at %x: %v", root, err) 79 } 80 if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil { 81 t.Fatalf("inconsistent trie at %x: %v", root, err) 82 } 83 for key, val := range content { 84 if have := trie.MustGet([]byte(key)); !bytes.Equal(have, val) { 85 t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) 86 } 87 } 88 } 89 90 // checkTrieConsistency checks that all nodes in a trie are indeed present. 91 func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error { 92 ndb := newTestDatabase(db, scheme) 93 trie, err := NewStateTrie(TrieID(root), ndb) 94 if err != nil { 95 return nil // Consider a non existent state consistent 96 } 97 it := trie.NodeIterator(nil) 98 for it.Next(true) { 99 } 100 return it.Error() 101 } 102 103 // trieElement represents the element in the state trie(bytecode or trie node). 104 type trieElement struct { 105 path string 106 hash common.Hash 107 syncPath SyncPath 108 } 109 110 // Tests that an empty trie is not scheduled for syncing. 111 func TestEmptySync(t *testing.T) { 112 dbA := NewDatabase(rawdb.NewMemoryDatabase()) 113 dbB := NewDatabase(rawdb.NewMemoryDatabase()) 114 //dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) 115 //dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) 116 117 emptyA := NewEmpty(dbA) 118 emptyB, _ := New(TrieID(types.EmptyRootHash), dbB) 119 //emptyC := NewEmpty(dbC) 120 //emptyD, _ := New(TrieID(types.EmptyRootHash), dbD) 121 122 for i, trie := range []*Trie{emptyA, emptyB /*emptyC, emptyD*/} { 123 sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB /*dbC, dbD*/}[i].Scheme()) 124 if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { 125 t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes) 126 } 127 } 128 } 129 130 // Tests that given a root hash, a trie can sync iteratively on a single thread, 131 // requesting retrieval tasks and returning all of them in one go. 132 func TestIterativeSync(t *testing.T) { 133 testIterativeSync(t, 1, false, rawdb.HashScheme) 134 testIterativeSync(t, 100, false, rawdb.HashScheme) 135 testIterativeSync(t, 1, true, rawdb.HashScheme) 136 testIterativeSync(t, 100, true, rawdb.HashScheme) 137 // testIterativeSync(t, 1, false, rawdb.PathScheme) 138 // testIterativeSync(t, 100, false, rawdb.PathScheme) 139 // testIterativeSync(t, 1, true, rawdb.PathScheme) 140 // testIterativeSync(t, 100, true, rawdb.PathScheme) 141 } 142 143 func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { 144 // Create a random trie to copy 145 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 146 147 // Create a destination trie and sync with the scheduler 148 diskdb := rawdb.NewMemoryDatabase() 149 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 150 151 // The code requests are ignored here since there is no code 152 // at the testing trie. 153 paths, nodes, _ := sched.Missing(count) 154 var elements []trieElement 155 for i := 0; i < len(paths); i++ { 156 elements = append(elements, trieElement{ 157 path: paths[i], 158 hash: nodes[i], 159 syncPath: NewSyncPath([]byte(paths[i])), 160 }) 161 } 162 for len(elements) > 0 { 163 results := make([]NodeSyncResult, len(elements)) 164 if !bypath { 165 for i, element := range elements { 166 owner, inner := ResolvePath([]byte(element.path)) 167 data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) 168 if err != nil { 169 t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err) 170 } 171 results[i] = NodeSyncResult{element.path, data} 172 } 173 } else { 174 for i, element := range elements { 175 data, _, err := srcTrie.GetNode(element.syncPath[len(element.syncPath)-1]) 176 if err != nil { 177 t.Fatalf("failed to retrieve node data for path %x: %v", element.path, err) 178 } 179 results[i] = NodeSyncResult{element.path, data} 180 } 181 } 182 for _, result := range results { 183 if err := sched.ProcessNode(result); err != nil { 184 t.Fatalf("failed to process result %v", err) 185 } 186 } 187 batch := diskdb.NewBatch() 188 if err := sched.Commit(batch); err != nil { 189 t.Fatalf("failed to commit data: %v", err) 190 } 191 batch.Write() 192 193 paths, nodes, _ = sched.Missing(count) 194 elements = elements[:0] 195 for i := 0; i < len(paths); i++ { 196 elements = append(elements, trieElement{ 197 path: paths[i], 198 hash: nodes[i], 199 syncPath: NewSyncPath([]byte(paths[i])), 200 }) 201 } 202 } 203 // Cross check that the two tries are in sync 204 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 205 } 206 207 // Tests that the trie scheduler can correctly reconstruct the state even if only 208 // partial results are returned, and the others sent only later. 209 func TestIterativeDelayedSync(t *testing.T) { 210 testIterativeDelayedSync(t, rawdb.HashScheme) 211 //testIterativeDelayedSync(t, rawdb.PathScheme) 212 } 213 214 func testIterativeDelayedSync(t *testing.T, scheme string) { 215 // Create a random trie to copy 216 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 217 218 // Create a destination trie and sync with the scheduler 219 diskdb := rawdb.NewMemoryDatabase() 220 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 221 222 // The code requests are ignored here since there is no code 223 // at the testing trie. 224 paths, nodes, _ := sched.Missing(10000) 225 var elements []trieElement 226 for i := 0; i < len(paths); i++ { 227 elements = append(elements, trieElement{ 228 path: paths[i], 229 hash: nodes[i], 230 syncPath: NewSyncPath([]byte(paths[i])), 231 }) 232 } 233 for len(elements) > 0 { 234 // Sync only half of the scheduled nodes 235 results := make([]NodeSyncResult, len(elements)/2+1) 236 for i, element := range elements[:len(results)] { 237 owner, inner := ResolvePath([]byte(element.path)) 238 data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) 239 if err != nil { 240 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 241 } 242 results[i] = NodeSyncResult{element.path, data} 243 } 244 for _, result := range results { 245 if err := sched.ProcessNode(result); err != nil { 246 t.Fatalf("failed to process result %v", err) 247 } 248 } 249 batch := diskdb.NewBatch() 250 if err := sched.Commit(batch); err != nil { 251 t.Fatalf("failed to commit data: %v", err) 252 } 253 batch.Write() 254 255 paths, nodes, _ = sched.Missing(10000) 256 elements = elements[len(results):] 257 for i := 0; i < len(paths); i++ { 258 elements = append(elements, trieElement{ 259 path: paths[i], 260 hash: nodes[i], 261 syncPath: NewSyncPath([]byte(paths[i])), 262 }) 263 } 264 } 265 // Cross check that the two tries are in sync 266 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 267 } 268 269 // Tests that given a root hash, a trie can sync iteratively on a single thread, 270 // requesting retrieval tasks and returning all of them in one go, however in a 271 // random order. 272 func TestIterativeRandomSyncIndividual(t *testing.T) { 273 testIterativeRandomSync(t, 1, rawdb.HashScheme) 274 testIterativeRandomSync(t, 100, rawdb.HashScheme) 275 // testIterativeRandomSync(t, 1, rawdb.PathScheme) 276 // testIterativeRandomSync(t, 100, rawdb.PathScheme) 277 } 278 279 func testIterativeRandomSync(t *testing.T, count int, scheme string) { 280 // Create a random trie to copy 281 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 282 283 // Create a destination trie and sync with the scheduler 284 diskdb := rawdb.NewMemoryDatabase() 285 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 286 287 // The code requests are ignored here since there is no code 288 // at the testing trie. 289 paths, nodes, _ := sched.Missing(count) 290 queue := make(map[string]trieElement) 291 for i, path := range paths { 292 queue[path] = trieElement{ 293 path: paths[i], 294 hash: nodes[i], 295 syncPath: NewSyncPath([]byte(paths[i])), 296 } 297 } 298 for len(queue) > 0 { 299 // Fetch all the queued nodes in a random order 300 results := make([]NodeSyncResult, 0, len(queue)) 301 for path, element := range queue { 302 owner, inner := ResolvePath([]byte(element.path)) 303 data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) 304 if err != nil { 305 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 306 } 307 results = append(results, NodeSyncResult{path, data}) 308 } 309 // Feed the retrieved results back and queue new tasks 310 for _, result := range results { 311 if err := sched.ProcessNode(result); err != nil { 312 t.Fatalf("failed to process result %v", err) 313 } 314 } 315 batch := diskdb.NewBatch() 316 if err := sched.Commit(batch); err != nil { 317 t.Fatalf("failed to commit data: %v", err) 318 } 319 batch.Write() 320 321 paths, nodes, _ = sched.Missing(count) 322 queue = make(map[string]trieElement) 323 for i, path := range paths { 324 queue[path] = trieElement{ 325 path: path, 326 hash: nodes[i], 327 syncPath: NewSyncPath([]byte(path)), 328 } 329 } 330 } 331 // Cross check that the two tries are in sync 332 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 333 } 334 335 // Tests that the trie scheduler can correctly reconstruct the state even if only 336 // partial results are returned (Even those randomly), others sent only later. 337 func TestIterativeRandomDelayedSync(t *testing.T) { 338 testIterativeRandomDelayedSync(t, rawdb.HashScheme) 339 // testIterativeRandomDelayedSync(t, rawdb.PathScheme) 340 } 341 342 func testIterativeRandomDelayedSync(t *testing.T, scheme string) { 343 // Create a random trie to copy 344 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 345 346 // Create a destination trie and sync with the scheduler 347 diskdb := rawdb.NewMemoryDatabase() 348 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 349 350 // The code requests are ignored here since there is no code 351 // at the testing trie. 352 paths, nodes, _ := sched.Missing(10000) 353 queue := make(map[string]trieElement) 354 for i, path := range paths { 355 queue[path] = trieElement{ 356 path: path, 357 hash: nodes[i], 358 syncPath: NewSyncPath([]byte(path)), 359 } 360 } 361 for len(queue) > 0 { 362 // Sync only half of the scheduled nodes, even those in random order 363 results := make([]NodeSyncResult, 0, len(queue)/2+1) 364 for path, element := range queue { 365 owner, inner := ResolvePath([]byte(element.path)) 366 data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) 367 if err != nil { 368 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 369 } 370 results = append(results, NodeSyncResult{path, data}) 371 372 if len(results) >= cap(results) { 373 break 374 } 375 } 376 // Feed the retrieved results back and queue new tasks 377 for _, result := range results { 378 if err := sched.ProcessNode(result); err != nil { 379 t.Fatalf("failed to process result %v", err) 380 } 381 } 382 batch := diskdb.NewBatch() 383 if err := sched.Commit(batch); err != nil { 384 t.Fatalf("failed to commit data: %v", err) 385 } 386 batch.Write() 387 for _, result := range results { 388 delete(queue, result.Path) 389 } 390 paths, nodes, _ = sched.Missing(10000) 391 for i, path := range paths { 392 queue[path] = trieElement{ 393 path: path, 394 hash: nodes[i], 395 syncPath: NewSyncPath([]byte(path)), 396 } 397 } 398 } 399 // Cross check that the two tries are in sync 400 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 401 } 402 403 // Tests that a trie sync will not request nodes multiple times, even if they 404 // have such references. 405 func TestDuplicateAvoidanceSync(t *testing.T) { 406 testDuplicateAvoidanceSync(t, rawdb.HashScheme) 407 // testDuplicateAvoidanceSync(t, rawdb.PathScheme) 408 } 409 410 func testDuplicateAvoidanceSync(t *testing.T, scheme string) { 411 // Create a random trie to copy 412 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 413 414 // Create a destination trie and sync with the scheduler 415 diskdb := rawdb.NewMemoryDatabase() 416 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 417 418 // The code requests are ignored here since there is no code 419 // at the testing trie. 420 paths, nodes, _ := sched.Missing(0) 421 var elements []trieElement 422 for i := 0; i < len(paths); i++ { 423 elements = append(elements, trieElement{ 424 path: paths[i], 425 hash: nodes[i], 426 syncPath: NewSyncPath([]byte(paths[i])), 427 }) 428 } 429 requested := make(map[common.Hash]struct{}) 430 431 for len(elements) > 0 { 432 results := make([]NodeSyncResult, len(elements)) 433 for i, element := range elements { 434 owner, inner := ResolvePath([]byte(element.path)) 435 data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) 436 if err != nil { 437 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 438 } 439 if _, ok := requested[element.hash]; ok { 440 t.Errorf("hash %x already requested once", element.hash) 441 } 442 requested[element.hash] = struct{}{} 443 444 results[i] = NodeSyncResult{element.path, data} 445 } 446 for _, result := range results { 447 if err := sched.ProcessNode(result); err != nil { 448 t.Fatalf("failed to process result %v", err) 449 } 450 } 451 batch := diskdb.NewBatch() 452 if err := sched.Commit(batch); err != nil { 453 t.Fatalf("failed to commit data: %v", err) 454 } 455 batch.Write() 456 457 paths, nodes, _ = sched.Missing(0) 458 elements = elements[:0] 459 for i := 0; i < len(paths); i++ { 460 elements = append(elements, trieElement{ 461 path: paths[i], 462 hash: nodes[i], 463 syncPath: NewSyncPath([]byte(paths[i])), 464 }) 465 } 466 } 467 // Cross check that the two tries are in sync 468 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 469 } 470 471 // Tests that at any point in time during a sync, only complete sub-tries are in 472 // the database. 473 func TestIncompleteSyncHash(t *testing.T) { 474 testIncompleteSync(t, rawdb.HashScheme) 475 // testIncompleteSync(t, rawdb.PathScheme) 476 } 477 478 func testIncompleteSync(t *testing.T, scheme string) { 479 t.Parallel() 480 481 // Create a random trie to copy 482 _, srcDb, srcTrie, _ := makeTestTrie(scheme) 483 484 // Create a destination trie and sync with the scheduler 485 diskdb := rawdb.NewMemoryDatabase() 486 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 487 488 // The code requests are ignored here since there is no code 489 // at the testing trie. 490 var ( 491 addedKeys []string 492 addedHashes []common.Hash 493 elements []trieElement 494 root = srcTrie.Hash() 495 ) 496 paths, nodes, _ := sched.Missing(1) 497 for i := 0; i < len(paths); i++ { 498 elements = append(elements, trieElement{ 499 path: paths[i], 500 hash: nodes[i], 501 syncPath: NewSyncPath([]byte(paths[i])), 502 }) 503 } 504 for len(elements) > 0 { 505 // Fetch a batch of trie nodes 506 results := make([]NodeSyncResult, len(elements)) 507 for i, element := range elements { 508 owner, inner := ResolvePath([]byte(element.path)) 509 data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) 510 if err != nil { 511 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 512 } 513 results[i] = NodeSyncResult{element.path, data} 514 } 515 // Process each of the trie nodes 516 for _, result := range results { 517 if err := sched.ProcessNode(result); err != nil { 518 t.Fatalf("failed to process result %v", err) 519 } 520 } 521 batch := diskdb.NewBatch() 522 if err := sched.Commit(batch); err != nil { 523 t.Fatalf("failed to commit data: %v", err) 524 } 525 batch.Write() 526 527 for _, result := range results { 528 hash := crypto.Keccak256Hash(result.Data) 529 if hash != root { 530 addedKeys = append(addedKeys, result.Path) 531 addedHashes = append(addedHashes, crypto.Keccak256Hash(result.Data)) 532 } 533 } 534 // Fetch the next batch to retrieve 535 paths, nodes, _ = sched.Missing(1) 536 elements = elements[:0] 537 for i := 0; i < len(paths); i++ { 538 elements = append(elements, trieElement{ 539 path: paths[i], 540 hash: nodes[i], 541 syncPath: NewSyncPath([]byte(paths[i])), 542 }) 543 } 544 } 545 // Sanity check that removing any node from the database is detected 546 for i, path := range addedKeys { 547 owner, inner := ResolvePath([]byte(path)) 548 nodeHash := addedHashes[i] 549 value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme) 550 rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme) 551 if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil { 552 t.Fatalf("trie inconsistency not caught, missing: %x", path) 553 } 554 rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme) 555 } 556 } 557 558 // Tests that trie nodes get scheduled lexicographically when having the same 559 // depth. 560 func TestSyncOrdering(t *testing.T) { 561 testSyncOrdering(t, rawdb.HashScheme) 562 // testSyncOrdering(t, rawdb.PathScheme) 563 } 564 565 func testSyncOrdering(t *testing.T, scheme string) { 566 // Create a random trie to copy 567 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 568 569 // Create a destination trie and sync with the scheduler, tracking the requests 570 diskdb := rawdb.NewMemoryDatabase() 571 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 572 573 // The code requests are ignored here since there is no code 574 // at the testing trie. 575 var ( 576 reqs []SyncPath 577 elements []trieElement 578 ) 579 paths, nodes, _ := sched.Missing(1) 580 for i := 0; i < len(paths); i++ { 581 elements = append(elements, trieElement{ 582 path: paths[i], 583 hash: nodes[i], 584 syncPath: NewSyncPath([]byte(paths[i])), 585 }) 586 reqs = append(reqs, NewSyncPath([]byte(paths[i]))) 587 } 588 589 for len(elements) > 0 { 590 results := make([]NodeSyncResult, len(elements)) 591 for i, element := range elements { 592 owner, inner := ResolvePath([]byte(element.path)) 593 data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) 594 if err != nil { 595 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 596 } 597 results[i] = NodeSyncResult{element.path, data} 598 } 599 for _, result := range results { 600 if err := sched.ProcessNode(result); err != nil { 601 t.Fatalf("failed to process result %v", err) 602 } 603 } 604 batch := diskdb.NewBatch() 605 if err := sched.Commit(batch); err != nil { 606 t.Fatalf("failed to commit data: %v", err) 607 } 608 batch.Write() 609 610 paths, nodes, _ = sched.Missing(1) 611 elements = elements[:0] 612 for i := 0; i < len(paths); i++ { 613 elements = append(elements, trieElement{ 614 path: paths[i], 615 hash: nodes[i], 616 syncPath: NewSyncPath([]byte(paths[i])), 617 }) 618 reqs = append(reqs, NewSyncPath([]byte(paths[i]))) 619 } 620 } 621 // Cross check that the two tries are in sync 622 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 623 624 // Check that the trie nodes have been requested path-ordered 625 for i := 0; i < len(reqs)-1; i++ { 626 if len(reqs[i]) > 1 || len(reqs[i+1]) > 1 { 627 // In the case of the trie tests, there's no storage so the tuples 628 // must always be single items. 2-tuples should be tested in state. 629 t.Errorf("Invalid request tuples: len(%v) or len(%v) > 1", reqs[i], reqs[i+1]) 630 } 631 if bytes.Compare(compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0])) > 0 { 632 t.Errorf("Invalid request order: %v before %v", compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0])) 633 } 634 } 635 } 636 637 func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database) { 638 // Create a destination trie and sync with the scheduler 639 sched := NewSync(root, db, nil, srcDb.Scheme()) 640 641 // The code requests are ignored here since there is no code 642 // at the testing trie. 643 paths, nodes, _ := sched.Missing(1) 644 var elements []trieElement 645 for i := 0; i < len(paths); i++ { 646 elements = append(elements, trieElement{ 647 path: paths[i], 648 hash: nodes[i], 649 syncPath: NewSyncPath([]byte(paths[i])), 650 }) 651 } 652 for len(elements) > 0 { 653 results := make([]NodeSyncResult, len(elements)) 654 for i, element := range elements { 655 owner, inner := ResolvePath([]byte(element.path)) 656 data, err := srcDb.Reader(root).Node(owner, inner, element.hash) 657 if err != nil { 658 t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err) 659 } 660 results[i] = NodeSyncResult{element.path, data} 661 } 662 for index, result := range results { 663 if err := sched.ProcessNode(result); err != nil { 664 t.Fatalf("failed to process result[%d][%v] data %v %v", index, []byte(result.Path), result.Data, err) 665 } 666 } 667 batch := db.NewBatch() 668 if err := sched.Commit(batch); err != nil { 669 t.Fatalf("failed to commit data: %v", err) 670 } 671 batch.Write() 672 673 paths, nodes, _ = sched.Missing(1) 674 elements = elements[:0] 675 for i := 0; i < len(paths); i++ { 676 elements = append(elements, trieElement{ 677 path: paths[i], 678 hash: nodes[i], 679 syncPath: NewSyncPath([]byte(paths[i])), 680 }) 681 } 682 } 683 } 684 685 // Tests that the syncing target is keeping moving which may overwrite the stale 686 // states synced in the last cycle. 687 func TestSyncMovingTarget(t *testing.T) { 688 testSyncMovingTarget(t, rawdb.HashScheme) 689 // testSyncMovingTarget(t, rawdb.PathScheme) 690 } 691 692 func testSyncMovingTarget(t *testing.T, scheme string) { 693 // Create a random trie to copy 694 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 695 696 // Create a destination trie and sync with the scheduler 697 diskdb := rawdb.NewMemoryDatabase() 698 syncWith(t, srcTrie.Hash(), diskdb, srcDb) 699 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 700 701 // Push more modifications into the src trie, to see if dest trie can still 702 // sync with it(overwrite stale states) 703 var ( 704 preRoot = srcTrie.Hash() 705 diff = make(map[string][]byte) 706 ) 707 for i := byte(0); i < 10; i++ { 708 key, val := randBytes(32), randBytes(32) 709 srcTrie.MustUpdate(key, val) 710 diff[string(key)] = val 711 } 712 root, nodes := srcTrie.Commit(false) 713 if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil { 714 panic(err) 715 } 716 if err := srcDb.Commit(root, false); err != nil { 717 panic(err) 718 } 719 preRoot = root 720 srcTrie, _ = NewStateTrie(TrieID(root), srcDb) 721 722 syncWith(t, srcTrie.Hash(), diskdb, srcDb) 723 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff) 724 725 // Revert added modifications from the src trie, to see if dest trie can still 726 // sync with it(overwrite reverted states) 727 var reverted = make(map[string][]byte) 728 for k := range diff { 729 srcTrie.MustDelete([]byte(k)) 730 reverted[k] = nil 731 } 732 for k := range srcData { 733 val := randBytes(32) 734 srcTrie.MustUpdate([]byte(k), val) 735 reverted[k] = val 736 } 737 root, nodes = srcTrie.Commit(false) 738 if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil { 739 panic(err) 740 } 741 if err := srcDb.Commit(root, false); err != nil { 742 panic(err) 743 } 744 srcTrie, _ = NewStateTrie(TrieID(root), srcDb) 745 746 syncWith(t, srcTrie.Hash(), diskdb, srcDb) 747 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted) 748 }