github.com/theQRL/go-zond@v0.1.1/trie/sync_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "bytes" 21 "fmt" 22 "testing" 23 24 "github.com/theQRL/go-zond/common" 25 "github.com/theQRL/go-zond/core/rawdb" 26 "github.com/theQRL/go-zond/core/types" 27 "github.com/theQRL/go-zond/crypto" 28 "github.com/theQRL/go-zond/zonddb" 29 "github.com/theQRL/go-zond/zonddb/memorydb" 30 "github.com/theQRL/go-zond/trie/trienode" 31 ) 32 33 // makeTestTrie create a sample test trie to test node-wise reconstruction. 34 func makeTestTrie(scheme string) (zonddb.Database, *Database, *StateTrie, map[string][]byte) { 35 // Create an empty trie 36 db := rawdb.NewMemoryDatabase() 37 triedb := newTestDatabase(db, scheme) 38 trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) 39 40 // Fill it with some arbitrary data 41 content := make(map[string][]byte) 42 for i := byte(0); i < 255; i++ { 43 // Map the same data under multiple keys 44 key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} 45 content[string(key)] = val 46 trie.MustUpdate(key, val) 47 48 key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} 49 content[string(key)] = val 50 trie.MustUpdate(key, val) 51 52 // Add some other data to inflate the trie 53 for j := byte(3); j < 13; j++ { 54 key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} 55 content[string(key)] = val 56 trie.MustUpdate(key, val) 57 } 58 } 59 root, nodes, _ := trie.Commit(false) 60 if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { 61 panic(fmt.Errorf("failed to commit db %v", err)) 62 } 63 if err := triedb.Commit(root, false); err != nil { 64 panic(err) 65 } 66 // Re-create the trie based on the new state 67 trie, _ = NewStateTrie(TrieID(root), triedb) 68 return db, triedb, trie, content 69 } 70 71 // checkTrieContents cross references a reconstructed trie with an expected data 72 // content map. 73 func checkTrieContents(t *testing.T, db zonddb.Database, scheme string, root []byte, content map[string][]byte) { 74 // Check root availability and trie contents 75 ndb := newTestDatabase(db, scheme) 76 trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb) 77 if err != nil { 78 t.Fatalf("failed to create trie at %x: %v", root, err) 79 } 80 if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil { 81 t.Fatalf("inconsistent trie at %x: %v", root, err) 82 } 83 for key, val := range content { 84 if have := trie.MustGet([]byte(key)); !bytes.Equal(have, val) { 85 t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) 86 } 87 } 88 } 89 90 // checkTrieConsistency checks that all nodes in a trie are indeed present. 91 func checkTrieConsistency(db zonddb.Database, scheme string, root common.Hash) error { 92 ndb := newTestDatabase(db, scheme) 93 trie, err := NewStateTrie(TrieID(root), ndb) 94 if err != nil { 95 return nil // Consider a non existent state consistent 96 } 97 it := trie.MustNodeIterator(nil) 98 for it.Next(true) { 99 } 100 return it.Error() 101 } 102 103 // trieElement represents the element in the state trie(bytecode or trie node). 104 type trieElement struct { 105 path string 106 hash common.Hash 107 syncPath SyncPath 108 } 109 110 // Tests that an empty trie is not scheduled for syncing. 111 func TestEmptySync(t *testing.T) { 112 dbA := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme) 113 dbB := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme) 114 dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) 115 dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) 116 117 emptyA := NewEmpty(dbA) 118 emptyB, _ := New(TrieID(types.EmptyRootHash), dbB) 119 emptyC := NewEmpty(dbC) 120 emptyD, _ := New(TrieID(types.EmptyRootHash), dbD) 121 122 for i, trie := range []*Trie{emptyA, emptyB, emptyC, emptyD} { 123 sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB, dbC, dbD}[i].Scheme()) 124 if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { 125 t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes) 126 } 127 } 128 } 129 130 // Tests that given a root hash, a trie can sync iteratively on a single thread, 131 // requesting retrieval tasks and returning all of them in one go. 132 func TestIterativeSync(t *testing.T) { 133 testIterativeSync(t, 1, false, rawdb.HashScheme) 134 testIterativeSync(t, 100, false, rawdb.HashScheme) 135 testIterativeSync(t, 1, true, rawdb.HashScheme) 136 testIterativeSync(t, 100, true, rawdb.HashScheme) 137 testIterativeSync(t, 1, false, rawdb.PathScheme) 138 testIterativeSync(t, 100, false, rawdb.PathScheme) 139 testIterativeSync(t, 1, true, rawdb.PathScheme) 140 testIterativeSync(t, 100, true, rawdb.PathScheme) 141 } 142 143 func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { 144 // Create a random trie to copy 145 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 146 147 // Create a destination trie and sync with the scheduler 148 diskdb := rawdb.NewMemoryDatabase() 149 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 150 151 // The code requests are ignored here since there is no code 152 // at the testing trie. 153 paths, nodes, _ := sched.Missing(count) 154 var elements []trieElement 155 for i := 0; i < len(paths); i++ { 156 elements = append(elements, trieElement{ 157 path: paths[i], 158 hash: nodes[i], 159 syncPath: NewSyncPath([]byte(paths[i])), 160 }) 161 } 162 reader, err := srcDb.Reader(srcTrie.Hash()) 163 if err != nil { 164 t.Fatalf("State is not available %x", srcTrie.Hash()) 165 } 166 for len(elements) > 0 { 167 results := make([]NodeSyncResult, len(elements)) 168 if !bypath { 169 for i, element := range elements { 170 owner, inner := ResolvePath([]byte(element.path)) 171 data, err := reader.Node(owner, inner, element.hash) 172 if err != nil { 173 t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err) 174 } 175 results[i] = NodeSyncResult{element.path, data} 176 } 177 } else { 178 for i, element := range elements { 179 data, _, err := srcTrie.GetNode(element.syncPath[len(element.syncPath)-1]) 180 if err != nil { 181 t.Fatalf("failed to retrieve node data for path %x: %v", element.path, err) 182 } 183 results[i] = NodeSyncResult{element.path, data} 184 } 185 } 186 for _, result := range results { 187 if err := sched.ProcessNode(result); err != nil { 188 t.Fatalf("failed to process result %v", err) 189 } 190 } 191 batch := diskdb.NewBatch() 192 if err := sched.Commit(batch); err != nil { 193 t.Fatalf("failed to commit data: %v", err) 194 } 195 batch.Write() 196 197 paths, nodes, _ = sched.Missing(count) 198 elements = elements[:0] 199 for i := 0; i < len(paths); i++ { 200 elements = append(elements, trieElement{ 201 path: paths[i], 202 hash: nodes[i], 203 syncPath: NewSyncPath([]byte(paths[i])), 204 }) 205 } 206 } 207 // Cross check that the two tries are in sync 208 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 209 } 210 211 // Tests that the trie scheduler can correctly reconstruct the state even if only 212 // partial results are returned, and the others sent only later. 213 func TestIterativeDelayedSync(t *testing.T) { 214 testIterativeDelayedSync(t, rawdb.HashScheme) 215 testIterativeDelayedSync(t, rawdb.PathScheme) 216 } 217 218 func testIterativeDelayedSync(t *testing.T, scheme string) { 219 // Create a random trie to copy 220 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 221 222 // Create a destination trie and sync with the scheduler 223 diskdb := rawdb.NewMemoryDatabase() 224 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 225 226 // The code requests are ignored here since there is no code 227 // at the testing trie. 228 paths, nodes, _ := sched.Missing(10000) 229 var elements []trieElement 230 for i := 0; i < len(paths); i++ { 231 elements = append(elements, trieElement{ 232 path: paths[i], 233 hash: nodes[i], 234 syncPath: NewSyncPath([]byte(paths[i])), 235 }) 236 } 237 reader, err := srcDb.Reader(srcTrie.Hash()) 238 if err != nil { 239 t.Fatalf("State is not available %x", srcTrie.Hash()) 240 } 241 for len(elements) > 0 { 242 // Sync only half of the scheduled nodes 243 results := make([]NodeSyncResult, len(elements)/2+1) 244 for i, element := range elements[:len(results)] { 245 owner, inner := ResolvePath([]byte(element.path)) 246 data, err := reader.Node(owner, inner, element.hash) 247 if err != nil { 248 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 249 } 250 results[i] = NodeSyncResult{element.path, data} 251 } 252 for _, result := range results { 253 if err := sched.ProcessNode(result); err != nil { 254 t.Fatalf("failed to process result %v", err) 255 } 256 } 257 batch := diskdb.NewBatch() 258 if err := sched.Commit(batch); err != nil { 259 t.Fatalf("failed to commit data: %v", err) 260 } 261 batch.Write() 262 263 paths, nodes, _ = sched.Missing(10000) 264 elements = elements[len(results):] 265 for i := 0; i < len(paths); i++ { 266 elements = append(elements, trieElement{ 267 path: paths[i], 268 hash: nodes[i], 269 syncPath: NewSyncPath([]byte(paths[i])), 270 }) 271 } 272 } 273 // Cross check that the two tries are in sync 274 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 275 } 276 277 // Tests that given a root hash, a trie can sync iteratively on a single thread, 278 // requesting retrieval tasks and returning all of them in one go, however in a 279 // random order. 280 func TestIterativeRandomSyncIndividual(t *testing.T) { 281 testIterativeRandomSync(t, 1, rawdb.HashScheme) 282 testIterativeRandomSync(t, 100, rawdb.HashScheme) 283 testIterativeRandomSync(t, 1, rawdb.PathScheme) 284 testIterativeRandomSync(t, 100, rawdb.PathScheme) 285 } 286 287 func testIterativeRandomSync(t *testing.T, count int, scheme string) { 288 // Create a random trie to copy 289 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 290 291 // Create a destination trie and sync with the scheduler 292 diskdb := rawdb.NewMemoryDatabase() 293 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 294 295 // The code requests are ignored here since there is no code 296 // at the testing trie. 297 paths, nodes, _ := sched.Missing(count) 298 queue := make(map[string]trieElement) 299 for i, path := range paths { 300 queue[path] = trieElement{ 301 path: paths[i], 302 hash: nodes[i], 303 syncPath: NewSyncPath([]byte(paths[i])), 304 } 305 } 306 reader, err := srcDb.Reader(srcTrie.Hash()) 307 if err != nil { 308 t.Fatalf("State is not available %x", srcTrie.Hash()) 309 } 310 for len(queue) > 0 { 311 // Fetch all the queued nodes in a random order 312 results := make([]NodeSyncResult, 0, len(queue)) 313 for path, element := range queue { 314 owner, inner := ResolvePath([]byte(element.path)) 315 data, err := reader.Node(owner, inner, element.hash) 316 if err != nil { 317 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 318 } 319 results = append(results, NodeSyncResult{path, data}) 320 } 321 // Feed the retrieved results back and queue new tasks 322 for _, result := range results { 323 if err := sched.ProcessNode(result); err != nil { 324 t.Fatalf("failed to process result %v", err) 325 } 326 } 327 batch := diskdb.NewBatch() 328 if err := sched.Commit(batch); err != nil { 329 t.Fatalf("failed to commit data: %v", err) 330 } 331 batch.Write() 332 333 paths, nodes, _ = sched.Missing(count) 334 queue = make(map[string]trieElement) 335 for i, path := range paths { 336 queue[path] = trieElement{ 337 path: path, 338 hash: nodes[i], 339 syncPath: NewSyncPath([]byte(path)), 340 } 341 } 342 } 343 // Cross check that the two tries are in sync 344 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 345 } 346 347 // Tests that the trie scheduler can correctly reconstruct the state even if only 348 // partial results are returned (Even those randomly), others sent only later. 349 func TestIterativeRandomDelayedSync(t *testing.T) { 350 testIterativeRandomDelayedSync(t, rawdb.HashScheme) 351 testIterativeRandomDelayedSync(t, rawdb.PathScheme) 352 } 353 354 func testIterativeRandomDelayedSync(t *testing.T, scheme string) { 355 // Create a random trie to copy 356 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 357 358 // Create a destination trie and sync with the scheduler 359 diskdb := rawdb.NewMemoryDatabase() 360 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 361 362 // The code requests are ignored here since there is no code 363 // at the testing trie. 364 paths, nodes, _ := sched.Missing(10000) 365 queue := make(map[string]trieElement) 366 for i, path := range paths { 367 queue[path] = trieElement{ 368 path: path, 369 hash: nodes[i], 370 syncPath: NewSyncPath([]byte(path)), 371 } 372 } 373 reader, err := srcDb.Reader(srcTrie.Hash()) 374 if err != nil { 375 t.Fatalf("State is not available %x", srcTrie.Hash()) 376 } 377 for len(queue) > 0 { 378 // Sync only half of the scheduled nodes, even those in random order 379 results := make([]NodeSyncResult, 0, len(queue)/2+1) 380 for path, element := range queue { 381 owner, inner := ResolvePath([]byte(element.path)) 382 data, err := reader.Node(owner, inner, element.hash) 383 if err != nil { 384 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 385 } 386 results = append(results, NodeSyncResult{path, data}) 387 388 if len(results) >= cap(results) { 389 break 390 } 391 } 392 // Feed the retrieved results back and queue new tasks 393 for _, result := range results { 394 if err := sched.ProcessNode(result); err != nil { 395 t.Fatalf("failed to process result %v", err) 396 } 397 } 398 batch := diskdb.NewBatch() 399 if err := sched.Commit(batch); err != nil { 400 t.Fatalf("failed to commit data: %v", err) 401 } 402 batch.Write() 403 for _, result := range results { 404 delete(queue, result.Path) 405 } 406 paths, nodes, _ = sched.Missing(10000) 407 for i, path := range paths { 408 queue[path] = trieElement{ 409 path: path, 410 hash: nodes[i], 411 syncPath: NewSyncPath([]byte(path)), 412 } 413 } 414 } 415 // Cross check that the two tries are in sync 416 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 417 } 418 419 // Tests that a trie sync will not request nodes multiple times, even if they 420 // have such references. 421 func TestDuplicateAvoidanceSync(t *testing.T) { 422 testDuplicateAvoidanceSync(t, rawdb.HashScheme) 423 testDuplicateAvoidanceSync(t, rawdb.PathScheme) 424 } 425 426 func testDuplicateAvoidanceSync(t *testing.T, scheme string) { 427 // Create a random trie to copy 428 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 429 430 // Create a destination trie and sync with the scheduler 431 diskdb := rawdb.NewMemoryDatabase() 432 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 433 434 // The code requests are ignored here since there is no code 435 // at the testing trie. 436 paths, nodes, _ := sched.Missing(0) 437 var elements []trieElement 438 for i := 0; i < len(paths); i++ { 439 elements = append(elements, trieElement{ 440 path: paths[i], 441 hash: nodes[i], 442 syncPath: NewSyncPath([]byte(paths[i])), 443 }) 444 } 445 reader, err := srcDb.Reader(srcTrie.Hash()) 446 if err != nil { 447 t.Fatalf("State is not available %x", srcTrie.Hash()) 448 } 449 requested := make(map[common.Hash]struct{}) 450 for len(elements) > 0 { 451 results := make([]NodeSyncResult, len(elements)) 452 for i, element := range elements { 453 owner, inner := ResolvePath([]byte(element.path)) 454 data, err := reader.Node(owner, inner, element.hash) 455 if err != nil { 456 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 457 } 458 if _, ok := requested[element.hash]; ok { 459 t.Errorf("hash %x already requested once", element.hash) 460 } 461 requested[element.hash] = struct{}{} 462 463 results[i] = NodeSyncResult{element.path, data} 464 } 465 for _, result := range results { 466 if err := sched.ProcessNode(result); err != nil { 467 t.Fatalf("failed to process result %v", err) 468 } 469 } 470 batch := diskdb.NewBatch() 471 if err := sched.Commit(batch); err != nil { 472 t.Fatalf("failed to commit data: %v", err) 473 } 474 batch.Write() 475 476 paths, nodes, _ = sched.Missing(0) 477 elements = elements[:0] 478 for i := 0; i < len(paths); i++ { 479 elements = append(elements, trieElement{ 480 path: paths[i], 481 hash: nodes[i], 482 syncPath: NewSyncPath([]byte(paths[i])), 483 }) 484 } 485 } 486 // Cross check that the two tries are in sync 487 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 488 } 489 490 // Tests that at any point in time during a sync, only complete sub-tries are in 491 // the database. 492 func TestIncompleteSyncHash(t *testing.T) { 493 testIncompleteSync(t, rawdb.HashScheme) 494 testIncompleteSync(t, rawdb.PathScheme) 495 } 496 497 func testIncompleteSync(t *testing.T, scheme string) { 498 // Create a random trie to copy 499 _, srcDb, srcTrie, _ := makeTestTrie(scheme) 500 501 // Create a destination trie and sync with the scheduler 502 diskdb := rawdb.NewMemoryDatabase() 503 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 504 505 // The code requests are ignored here since there is no code 506 // at the testing trie. 507 var ( 508 addedKeys []string 509 addedHashes []common.Hash 510 elements []trieElement 511 root = srcTrie.Hash() 512 ) 513 paths, nodes, _ := sched.Missing(1) 514 for i := 0; i < len(paths); i++ { 515 elements = append(elements, trieElement{ 516 path: paths[i], 517 hash: nodes[i], 518 syncPath: NewSyncPath([]byte(paths[i])), 519 }) 520 } 521 reader, err := srcDb.Reader(srcTrie.Hash()) 522 if err != nil { 523 t.Fatalf("State is not available %x", srcTrie.Hash()) 524 } 525 for len(elements) > 0 { 526 // Fetch a batch of trie nodes 527 results := make([]NodeSyncResult, len(elements)) 528 for i, element := range elements { 529 owner, inner := ResolvePath([]byte(element.path)) 530 data, err := reader.Node(owner, inner, element.hash) 531 if err != nil { 532 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 533 } 534 results[i] = NodeSyncResult{element.path, data} 535 } 536 // Process each of the trie nodes 537 for _, result := range results { 538 if err := sched.ProcessNode(result); err != nil { 539 t.Fatalf("failed to process result %v", err) 540 } 541 } 542 batch := diskdb.NewBatch() 543 if err := sched.Commit(batch); err != nil { 544 t.Fatalf("failed to commit data: %v", err) 545 } 546 batch.Write() 547 548 for _, result := range results { 549 hash := crypto.Keccak256Hash(result.Data) 550 if hash != root { 551 addedKeys = append(addedKeys, result.Path) 552 addedHashes = append(addedHashes, crypto.Keccak256Hash(result.Data)) 553 } 554 } 555 // Fetch the next batch to retrieve 556 paths, nodes, _ = sched.Missing(1) 557 elements = elements[:0] 558 for i := 0; i < len(paths); i++ { 559 elements = append(elements, trieElement{ 560 path: paths[i], 561 hash: nodes[i], 562 syncPath: NewSyncPath([]byte(paths[i])), 563 }) 564 } 565 } 566 // Sanity check that removing any node from the database is detected 567 for i, path := range addedKeys { 568 owner, inner := ResolvePath([]byte(path)) 569 nodeHash := addedHashes[i] 570 value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme) 571 rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme) 572 if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil { 573 t.Fatalf("trie inconsistency not caught, missing: %x", path) 574 } 575 rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme) 576 } 577 } 578 579 // Tests that trie nodes get scheduled lexicographically when having the same 580 // depth. 581 func TestSyncOrdering(t *testing.T) { 582 testSyncOrdering(t, rawdb.HashScheme) 583 testSyncOrdering(t, rawdb.PathScheme) 584 } 585 586 func testSyncOrdering(t *testing.T, scheme string) { 587 // Create a random trie to copy 588 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 589 590 // Create a destination trie and sync with the scheduler, tracking the requests 591 diskdb := rawdb.NewMemoryDatabase() 592 sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) 593 594 // The code requests are ignored here since there is no code 595 // at the testing trie. 596 var ( 597 reqs []SyncPath 598 elements []trieElement 599 ) 600 paths, nodes, _ := sched.Missing(1) 601 for i := 0; i < len(paths); i++ { 602 elements = append(elements, trieElement{ 603 path: paths[i], 604 hash: nodes[i], 605 syncPath: NewSyncPath([]byte(paths[i])), 606 }) 607 reqs = append(reqs, NewSyncPath([]byte(paths[i]))) 608 } 609 reader, err := srcDb.Reader(srcTrie.Hash()) 610 if err != nil { 611 t.Fatalf("State is not available %x", srcTrie.Hash()) 612 } 613 for len(elements) > 0 { 614 results := make([]NodeSyncResult, len(elements)) 615 for i, element := range elements { 616 owner, inner := ResolvePath([]byte(element.path)) 617 data, err := reader.Node(owner, inner, element.hash) 618 if err != nil { 619 t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) 620 } 621 results[i] = NodeSyncResult{element.path, data} 622 } 623 for _, result := range results { 624 if err := sched.ProcessNode(result); err != nil { 625 t.Fatalf("failed to process result %v", err) 626 } 627 } 628 batch := diskdb.NewBatch() 629 if err := sched.Commit(batch); err != nil { 630 t.Fatalf("failed to commit data: %v", err) 631 } 632 batch.Write() 633 634 paths, nodes, _ = sched.Missing(1) 635 elements = elements[:0] 636 for i := 0; i < len(paths); i++ { 637 elements = append(elements, trieElement{ 638 path: paths[i], 639 hash: nodes[i], 640 syncPath: NewSyncPath([]byte(paths[i])), 641 }) 642 reqs = append(reqs, NewSyncPath([]byte(paths[i]))) 643 } 644 } 645 // Cross check that the two tries are in sync 646 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 647 648 // Check that the trie nodes have been requested path-ordered 649 for i := 0; i < len(reqs)-1; i++ { 650 if len(reqs[i]) > 1 || len(reqs[i+1]) > 1 { 651 // In the case of the trie tests, there's no storage so the tuples 652 // must always be single items. 2-tuples should be tested in state. 653 t.Errorf("Invalid request tuples: len(%v) or len(%v) > 1", reqs[i], reqs[i+1]) 654 } 655 if bytes.Compare(compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0])) > 0 { 656 t.Errorf("Invalid request order: %v before %v", compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0])) 657 } 658 } 659 } 660 661 func syncWith(t *testing.T, root common.Hash, db zonddb.Database, srcDb *Database) { 662 // Create a destination trie and sync with the scheduler 663 sched := NewSync(root, db, nil, srcDb.Scheme()) 664 665 // The code requests are ignored here since there is no code 666 // at the testing trie. 667 paths, nodes, _ := sched.Missing(1) 668 var elements []trieElement 669 for i := 0; i < len(paths); i++ { 670 elements = append(elements, trieElement{ 671 path: paths[i], 672 hash: nodes[i], 673 syncPath: NewSyncPath([]byte(paths[i])), 674 }) 675 } 676 reader, err := srcDb.Reader(root) 677 if err != nil { 678 t.Fatalf("State is not available %x", root) 679 } 680 for len(elements) > 0 { 681 results := make([]NodeSyncResult, len(elements)) 682 for i, element := range elements { 683 owner, inner := ResolvePath([]byte(element.path)) 684 data, err := reader.Node(owner, inner, element.hash) 685 if err != nil { 686 t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err) 687 } 688 results[i] = NodeSyncResult{element.path, data} 689 } 690 for index, result := range results { 691 if err := sched.ProcessNode(result); err != nil { 692 t.Fatalf("failed to process result[%d][%v] data %v %v", index, []byte(result.Path), result.Data, err) 693 } 694 } 695 batch := db.NewBatch() 696 if err := sched.Commit(batch); err != nil { 697 t.Fatalf("failed to commit data: %v", err) 698 } 699 batch.Write() 700 701 paths, nodes, _ = sched.Missing(1) 702 elements = elements[:0] 703 for i := 0; i < len(paths); i++ { 704 elements = append(elements, trieElement{ 705 path: paths[i], 706 hash: nodes[i], 707 syncPath: NewSyncPath([]byte(paths[i])), 708 }) 709 } 710 } 711 } 712 713 // Tests that the syncing target is keeping moving which may overwrite the stale 714 // states synced in the last cycle. 715 func TestSyncMovingTarget(t *testing.T) { 716 testSyncMovingTarget(t, rawdb.HashScheme) 717 testSyncMovingTarget(t, rawdb.PathScheme) 718 } 719 720 func testSyncMovingTarget(t *testing.T, scheme string) { 721 // Create a random trie to copy 722 _, srcDb, srcTrie, srcData := makeTestTrie(scheme) 723 724 // Create a destination trie and sync with the scheduler 725 diskdb := rawdb.NewMemoryDatabase() 726 syncWith(t, srcTrie.Hash(), diskdb, srcDb) 727 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) 728 729 // Push more modifications into the src trie, to see if dest trie can still 730 // sync with it(overwrite stale states) 731 var ( 732 preRoot = srcTrie.Hash() 733 diff = make(map[string][]byte) 734 ) 735 for i := byte(0); i < 10; i++ { 736 key, val := randBytes(32), randBytes(32) 737 srcTrie.MustUpdate(key, val) 738 diff[string(key)] = val 739 } 740 root, nodes, _ := srcTrie.Commit(false) 741 if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { 742 panic(err) 743 } 744 if err := srcDb.Commit(root, false); err != nil { 745 panic(err) 746 } 747 preRoot = root 748 srcTrie, _ = NewStateTrie(TrieID(root), srcDb) 749 750 syncWith(t, srcTrie.Hash(), diskdb, srcDb) 751 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff) 752 753 // Revert added modifications from the src trie, to see if dest trie can still 754 // sync with it(overwrite reverted states) 755 var reverted = make(map[string][]byte) 756 for k := range diff { 757 srcTrie.MustDelete([]byte(k)) 758 reverted[k] = nil 759 } 760 for k := range srcData { 761 val := randBytes(32) 762 srcTrie.MustUpdate([]byte(k), val) 763 reverted[k] = val 764 } 765 root, nodes, _ = srcTrie.Commit(false) 766 if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { 767 panic(err) 768 } 769 if err := srcDb.Commit(root, false); err != nil { 770 panic(err) 771 } 772 srcTrie, _ = NewStateTrie(TrieID(root), srcDb) 773 774 syncWith(t, srcTrie.Hash(), diskdb, srcDb) 775 checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted) 776 }