github.com/MikyChow/arbitrum-go-ethereum@v0.0.0-20230306102812-078da49636de/eth/protocols/snap/sync_test.go (about) 1 // Copyright 2021 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snap 18 19 import ( 20 "bytes" 21 "crypto/rand" 22 "encoding/binary" 23 "fmt" 24 "math/big" 25 "sort" 26 "sync" 27 "testing" 28 "time" 29 30 "github.com/MikyChow/arbitrum-go-ethereum/common" 31 "github.com/MikyChow/arbitrum-go-ethereum/core/rawdb" 32 "github.com/MikyChow/arbitrum-go-ethereum/core/types" 33 "github.com/MikyChow/arbitrum-go-ethereum/crypto" 34 "github.com/MikyChow/arbitrum-go-ethereum/ethdb" 35 "github.com/MikyChow/arbitrum-go-ethereum/light" 36 "github.com/MikyChow/arbitrum-go-ethereum/log" 37 "github.com/MikyChow/arbitrum-go-ethereum/rlp" 38 "github.com/MikyChow/arbitrum-go-ethereum/trie" 39 "golang.org/x/crypto/sha3" 40 ) 41 42 func TestHashing(t *testing.T) { 43 t.Parallel() 44 45 var bytecodes = make([][]byte, 10) 46 for i := 0; i < len(bytecodes); i++ { 47 buf := make([]byte, 100) 48 rand.Read(buf) 49 bytecodes[i] = buf 50 } 51 var want, got string 52 var old = func() { 53 hasher := sha3.NewLegacyKeccak256() 54 for i := 0; i < len(bytecodes); i++ { 55 hasher.Reset() 56 hasher.Write(bytecodes[i]) 57 hash := hasher.Sum(nil) 58 got = fmt.Sprintf("%v\n%v", got, hash) 59 } 60 } 61 var new = func() { 62 hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) 63 var hash = make([]byte, 32) 64 for i := 0; i < len(bytecodes); i++ { 65 hasher.Reset() 66 hasher.Write(bytecodes[i]) 67 hasher.Read(hash) 68 want = fmt.Sprintf("%v\n%v", want, hash) 69 } 70 } 71 old() 72 new() 73 if want != got { 74 t.Errorf("want\n%v\ngot\n%v\n", want, got) 75 } 76 } 77 78 func BenchmarkHashing(b *testing.B) { 79 var bytecodes = make([][]byte, 10000) 80 for i := 0; i < len(bytecodes); i++ { 81 buf := make([]byte, 100) 82 rand.Read(buf) 83 bytecodes[i] = buf 84 } 85 var old = func() { 86 hasher := sha3.NewLegacyKeccak256() 87 for i := 0; i < len(bytecodes); i++ { 88 hasher.Reset() 89 hasher.Write(bytecodes[i]) 90 hasher.Sum(nil) 91 } 92 } 93 var new = func() { 94 hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) 95 var hash = make([]byte, 32) 96 for i := 0; i < len(bytecodes); i++ { 97 hasher.Reset() 98 hasher.Write(bytecodes[i]) 99 hasher.Read(hash) 100 } 101 } 102 b.Run("old", func(b *testing.B) { 103 b.ReportAllocs() 104 for i := 0; i < b.N; i++ { 105 old() 106 } 107 }) 108 b.Run("new", func(b *testing.B) { 109 b.ReportAllocs() 110 for i := 0; i < b.N; i++ { 111 new() 112 } 113 }) 114 } 115 116 type ( 117 accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error 118 storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error 119 trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error 120 codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error 121 ) 122 123 type testPeer struct { 124 id string 125 test *testing.T 126 remote *Syncer 127 logger log.Logger 128 accountTrie *trie.Trie 129 accountValues entrySlice 130 storageTries map[common.Hash]*trie.Trie 131 storageValues map[common.Hash]entrySlice 132 133 accountRequestHandler accountHandlerFunc 134 storageRequestHandler storageHandlerFunc 135 trieRequestHandler trieHandlerFunc 136 codeRequestHandler codeHandlerFunc 137 term func() 138 139 // counters 140 nAccountRequests int 141 nStorageRequests int 142 nBytecodeRequests int 143 nTrienodeRequests int 144 } 145 146 func newTestPeer(id string, t *testing.T, term func()) *testPeer { 147 peer := &testPeer{ 148 id: id, 149 test: t, 150 logger: log.New("id", id), 151 accountRequestHandler: defaultAccountRequestHandler, 152 trieRequestHandler: defaultTrieRequestHandler, 153 storageRequestHandler: defaultStorageRequestHandler, 154 codeRequestHandler: defaultCodeRequestHandler, 155 term: term, 156 } 157 //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) 158 //peer.logger.SetHandler(stderrHandler) 159 return peer 160 } 161 162 func (t *testPeer) ID() string { return t.id } 163 func (t *testPeer) Log() log.Logger { return t.logger } 164 165 func (t *testPeer) Stats() string { 166 return fmt.Sprintf(`Account requests: %d 167 Storage requests: %d 168 Bytecode requests: %d 169 Trienode requests: %d 170 `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests) 171 } 172 173 func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { 174 t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) 175 t.nAccountRequests++ 176 go t.accountRequestHandler(t, id, root, origin, limit, bytes) 177 return nil 178 } 179 180 func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error { 181 t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes)) 182 t.nTrienodeRequests++ 183 go t.trieRequestHandler(t, id, root, paths, bytes) 184 return nil 185 } 186 187 func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { 188 t.nStorageRequests++ 189 if len(accounts) == 1 && origin != nil { 190 t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes)) 191 } else { 192 t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes)) 193 } 194 go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes) 195 return nil 196 } 197 198 func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { 199 t.nBytecodeRequests++ 200 t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes)) 201 go t.codeRequestHandler(t, id, hashes, bytes) 202 return nil 203 } 204 205 // defaultTrieRequestHandler is a well-behaving handler for trie healing requests 206 func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { 207 // Pass the response 208 var nodes [][]byte 209 for _, pathset := range paths { 210 switch len(pathset) { 211 case 1: 212 blob, _, err := t.accountTrie.TryGetNode(pathset[0]) 213 if err != nil { 214 t.logger.Info("Error handling req", "error", err) 215 break 216 } 217 nodes = append(nodes, blob) 218 default: 219 account := t.storageTries[(common.BytesToHash(pathset[0]))] 220 for _, path := range pathset[1:] { 221 blob, _, err := account.TryGetNode(path) 222 if err != nil { 223 t.logger.Info("Error handling req", "error", err) 224 break 225 } 226 nodes = append(nodes, blob) 227 } 228 } 229 } 230 t.remote.OnTrieNodes(t, requestId, nodes) 231 return nil 232 } 233 234 // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests 235 func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { 236 keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap) 237 if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil { 238 t.test.Errorf("Remote side rejected our delivery: %v", err) 239 t.term() 240 return err 241 } 242 return nil 243 } 244 245 func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { 246 var size uint64 247 if limit == (common.Hash{}) { 248 limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") 249 } 250 for _, entry := range t.accountValues { 251 if size > cap { 252 break 253 } 254 if bytes.Compare(origin[:], entry.k) <= 0 { 255 keys = append(keys, common.BytesToHash(entry.k)) 256 vals = append(vals, entry.v) 257 size += uint64(32 + len(entry.v)) 258 } 259 // If we've exceeded the request threshold, abort 260 if bytes.Compare(entry.k, limit[:]) >= 0 { 261 break 262 } 263 } 264 // Unless we send the entire trie, we need to supply proofs 265 // Actually, we need to supply proofs either way! This seems to be an implementation 266 // quirk in go-ethereum 267 proof := light.NewNodeSet() 268 if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { 269 t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err) 270 } 271 if len(keys) > 0 { 272 lastK := (keys[len(keys)-1])[:] 273 if err := t.accountTrie.Prove(lastK, 0, proof); err != nil { 274 t.logger.Error("Could not prove last item", "error", err) 275 } 276 } 277 for _, blob := range proof.NodeList() { 278 proofs = append(proofs, blob) 279 } 280 return keys, vals, proofs 281 } 282 283 // defaultStorageRequestHandler is a well-behaving storage request handler 284 func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error { 285 hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max) 286 if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { 287 t.test.Errorf("Remote side rejected our delivery: %v", err) 288 t.term() 289 } 290 return nil 291 } 292 293 func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { 294 var bytecodes [][]byte 295 for _, h := range hashes { 296 bytecodes = append(bytecodes, getCodeByHash(h)) 297 } 298 if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { 299 t.test.Errorf("Remote side rejected our delivery: %v", err) 300 t.term() 301 } 302 return nil 303 } 304 305 func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { 306 var size uint64 307 for _, account := range accounts { 308 // The first account might start from a different origin and end sooner 309 var originHash common.Hash 310 if len(origin) > 0 { 311 originHash = common.BytesToHash(origin) 312 } 313 var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") 314 if len(limit) > 0 { 315 limitHash = common.BytesToHash(limit) 316 } 317 var ( 318 keys []common.Hash 319 vals [][]byte 320 abort bool 321 ) 322 for _, entry := range t.storageValues[account] { 323 if size >= max { 324 abort = true 325 break 326 } 327 if bytes.Compare(entry.k, originHash[:]) < 0 { 328 continue 329 } 330 keys = append(keys, common.BytesToHash(entry.k)) 331 vals = append(vals, entry.v) 332 size += uint64(32 + len(entry.v)) 333 if bytes.Compare(entry.k, limitHash[:]) >= 0 { 334 break 335 } 336 } 337 if len(keys) > 0 { 338 hashes = append(hashes, keys) 339 slots = append(slots, vals) 340 } 341 // Generate the Merkle proofs for the first and last storage slot, but 342 // only if the response was capped. If the entire storage trie included 343 // in the response, no need for any proofs. 344 if originHash != (common.Hash{}) || (abort && len(keys) > 0) { 345 // If we're aborting, we need to prove the first and last item 346 // This terminates the response (and thus the loop) 347 proof := light.NewNodeSet() 348 stTrie := t.storageTries[account] 349 350 // Here's a potential gotcha: when constructing the proof, we cannot 351 // use the 'origin' slice directly, but must use the full 32-byte 352 // hash form. 353 if err := stTrie.Prove(originHash[:], 0, proof); err != nil { 354 t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err) 355 } 356 if len(keys) > 0 { 357 lastK := (keys[len(keys)-1])[:] 358 if err := stTrie.Prove(lastK, 0, proof); err != nil { 359 t.logger.Error("Could not prove last item", "error", err) 360 } 361 } 362 for _, blob := range proof.NodeList() { 363 proofs = append(proofs, blob) 364 } 365 break 366 } 367 } 368 return hashes, slots, proofs 369 } 370 371 // the createStorageRequestResponseAlwaysProve tests a cornercase, where it always 372 // 373 // supplies the proof for the last account, even if it is 'complete'.h 374 func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { 375 var size uint64 376 max = max * 3 / 4 377 378 var origin common.Hash 379 if len(bOrigin) > 0 { 380 origin = common.BytesToHash(bOrigin) 381 } 382 var exit bool 383 for i, account := range accounts { 384 var keys []common.Hash 385 var vals [][]byte 386 for _, entry := range t.storageValues[account] { 387 if bytes.Compare(entry.k, origin[:]) < 0 { 388 exit = true 389 } 390 keys = append(keys, common.BytesToHash(entry.k)) 391 vals = append(vals, entry.v) 392 size += uint64(32 + len(entry.v)) 393 if size > max { 394 exit = true 395 } 396 } 397 if i == len(accounts)-1 { 398 exit = true 399 } 400 hashes = append(hashes, keys) 401 slots = append(slots, vals) 402 403 if exit { 404 // If we're aborting, we need to prove the first and last item 405 // This terminates the response (and thus the loop) 406 proof := light.NewNodeSet() 407 stTrie := t.storageTries[account] 408 409 // Here's a potential gotcha: when constructing the proof, we cannot 410 // use the 'origin' slice directly, but must use the full 32-byte 411 // hash form. 412 if err := stTrie.Prove(origin[:], 0, proof); err != nil { 413 t.logger.Error("Could not prove inexistence of origin", "origin", origin, 414 "error", err) 415 } 416 if len(keys) > 0 { 417 lastK := (keys[len(keys)-1])[:] 418 if err := stTrie.Prove(lastK, 0, proof); err != nil { 419 t.logger.Error("Could not prove last item", "error", err) 420 } 421 } 422 for _, blob := range proof.NodeList() { 423 proofs = append(proofs, blob) 424 } 425 break 426 } 427 } 428 return hashes, slots, proofs 429 } 430 431 // emptyRequestAccountRangeFn is a rejects AccountRangeRequests 432 func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { 433 t.remote.OnAccounts(t, requestId, nil, nil, nil) 434 return nil 435 } 436 437 func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { 438 return nil 439 } 440 441 func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { 442 t.remote.OnTrieNodes(t, requestId, nil) 443 return nil 444 } 445 446 func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { 447 return nil 448 } 449 450 func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { 451 t.remote.OnStorage(t, requestId, nil, nil, nil) 452 return nil 453 } 454 455 func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { 456 return nil 457 } 458 459 func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { 460 hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max) 461 if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { 462 t.test.Errorf("Remote side rejected our delivery: %v", err) 463 t.term() 464 } 465 return nil 466 } 467 468 //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { 469 // var bytecodes [][]byte 470 // t.remote.OnByteCodes(t, id, bytecodes) 471 // return nil 472 //} 473 474 func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { 475 var bytecodes [][]byte 476 for _, h := range hashes { 477 // Send back the hashes 478 bytecodes = append(bytecodes, h[:]) 479 } 480 if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { 481 t.logger.Info("remote error on delivery (as expected)", "error", err) 482 // Mimic the real-life handler, which drops a peer on errors 483 t.remote.Unregister(t.id) 484 } 485 return nil 486 } 487 488 func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { 489 var bytecodes [][]byte 490 for _, h := range hashes[:1] { 491 bytecodes = append(bytecodes, getCodeByHash(h)) 492 } 493 // Missing bytecode can be retrieved again, no error expected 494 if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { 495 t.test.Errorf("Remote side rejected our delivery: %v", err) 496 t.term() 497 } 498 return nil 499 } 500 501 // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small 502 func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { 503 return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500) 504 } 505 506 func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { 507 return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500) 508 } 509 510 //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { 511 // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500) 512 //} 513 514 func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { 515 hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap) 516 if len(proofs) > 0 { 517 proofs = proofs[1:] 518 } 519 if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil { 520 t.logger.Info("remote error on delivery (as expected)", "error", err) 521 // Mimic the real-life handler, which drops a peer on errors 522 t.remote.Unregister(t.id) 523 } 524 return nil 525 } 526 527 // corruptStorageRequestHandler doesn't provide good proofs 528 func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { 529 hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max) 530 if len(proofs) > 0 { 531 proofs = proofs[1:] 532 } 533 if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { 534 t.logger.Info("remote error on delivery (as expected)", "error", err) 535 // Mimic the real-life handler, which drops a peer on errors 536 t.remote.Unregister(t.id) 537 } 538 return nil 539 } 540 541 func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { 542 hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max) 543 if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil { 544 t.logger.Info("remote error on delivery (as expected)", "error", err) 545 // Mimic the real-life handler, which drops a peer on errors 546 t.remote.Unregister(t.id) 547 } 548 return nil 549 } 550 551 // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but 552 // also ship the entire trie inside the proof. If the attack is successful, 553 // the remote side does not do any follow-up requests 554 func TestSyncBloatedProof(t *testing.T) { 555 t.Parallel() 556 557 var ( 558 once sync.Once 559 cancel = make(chan struct{}) 560 term = func() { 561 once.Do(func() { 562 close(cancel) 563 }) 564 } 565 ) 566 sourceAccountTrie, elems := makeAccountTrieNoStorage(100) 567 source := newTestPeer("source", t, term) 568 source.accountTrie = sourceAccountTrie 569 source.accountValues = elems 570 571 source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { 572 var ( 573 proofs [][]byte 574 keys []common.Hash 575 vals [][]byte 576 ) 577 // The values 578 for _, entry := range t.accountValues { 579 if bytes.Compare(entry.k, origin[:]) < 0 { 580 continue 581 } 582 if bytes.Compare(entry.k, limit[:]) > 0 { 583 continue 584 } 585 keys = append(keys, common.BytesToHash(entry.k)) 586 vals = append(vals, entry.v) 587 } 588 // The proofs 589 proof := light.NewNodeSet() 590 if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { 591 t.logger.Error("Could not prove origin", "origin", origin, "error", err) 592 } 593 // The bloat: add proof of every single element 594 for _, entry := range t.accountValues { 595 if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil { 596 t.logger.Error("Could not prove item", "error", err) 597 } 598 } 599 // And remove one item from the elements 600 if len(keys) > 2 { 601 keys = append(keys[:1], keys[2:]...) 602 vals = append(vals[:1], vals[2:]...) 603 } 604 for _, blob := range proof.NodeList() { 605 proofs = append(proofs, blob) 606 } 607 if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil { 608 t.logger.Info("remote error on delivery (as expected)", "error", err) 609 t.term() 610 // This is actually correct, signal to exit the test successfully 611 } 612 return nil 613 } 614 syncer := setupSyncer(source) 615 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil { 616 t.Fatal("No error returned from incomplete/cancelled sync") 617 } 618 } 619 620 func setupSyncer(peers ...*testPeer) *Syncer { 621 stateDb := rawdb.NewMemoryDatabase() 622 syncer := NewSyncer(stateDb) 623 for _, peer := range peers { 624 syncer.Register(peer) 625 peer.remote = syncer 626 } 627 return syncer 628 } 629 630 // TestSync tests a basic sync with one peer 631 func TestSync(t *testing.T) { 632 t.Parallel() 633 634 var ( 635 once sync.Once 636 cancel = make(chan struct{}) 637 term = func() { 638 once.Do(func() { 639 close(cancel) 640 }) 641 } 642 ) 643 sourceAccountTrie, elems := makeAccountTrieNoStorage(100) 644 645 mkSource := func(name string) *testPeer { 646 source := newTestPeer(name, t, term) 647 source.accountTrie = sourceAccountTrie 648 source.accountValues = elems 649 return source 650 } 651 syncer := setupSyncer(mkSource("source")) 652 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 653 t.Fatalf("sync failed: %v", err) 654 } 655 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 656 } 657 658 // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a 659 // panic within the prover 660 func TestSyncTinyTriePanic(t *testing.T) { 661 t.Parallel() 662 663 var ( 664 once sync.Once 665 cancel = make(chan struct{}) 666 term = func() { 667 once.Do(func() { 668 close(cancel) 669 }) 670 } 671 ) 672 sourceAccountTrie, elems := makeAccountTrieNoStorage(1) 673 674 mkSource := func(name string) *testPeer { 675 source := newTestPeer(name, t, term) 676 source.accountTrie = sourceAccountTrie 677 source.accountValues = elems 678 return source 679 } 680 syncer := setupSyncer(mkSource("source")) 681 done := checkStall(t, term) 682 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 683 t.Fatalf("sync failed: %v", err) 684 } 685 close(done) 686 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 687 } 688 689 // TestMultiSync tests a basic sync with multiple peers 690 func TestMultiSync(t *testing.T) { 691 t.Parallel() 692 693 var ( 694 once sync.Once 695 cancel = make(chan struct{}) 696 term = func() { 697 once.Do(func() { 698 close(cancel) 699 }) 700 } 701 ) 702 sourceAccountTrie, elems := makeAccountTrieNoStorage(100) 703 704 mkSource := func(name string) *testPeer { 705 source := newTestPeer(name, t, term) 706 source.accountTrie = sourceAccountTrie 707 source.accountValues = elems 708 return source 709 } 710 syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB")) 711 done := checkStall(t, term) 712 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 713 t.Fatalf("sync failed: %v", err) 714 } 715 close(done) 716 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 717 } 718 719 // TestSyncWithStorage tests basic sync using accounts + storage + code 720 func TestSyncWithStorage(t *testing.T) { 721 t.Parallel() 722 723 var ( 724 once sync.Once 725 cancel = make(chan struct{}) 726 term = func() { 727 once.Do(func() { 728 close(cancel) 729 }) 730 } 731 ) 732 sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false) 733 734 mkSource := func(name string) *testPeer { 735 source := newTestPeer(name, t, term) 736 source.accountTrie = sourceAccountTrie 737 source.accountValues = elems 738 source.storageTries = storageTries 739 source.storageValues = storageElems 740 return source 741 } 742 syncer := setupSyncer(mkSource("sourceA")) 743 done := checkStall(t, term) 744 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 745 t.Fatalf("sync failed: %v", err) 746 } 747 close(done) 748 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 749 } 750 751 // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all 752 func TestMultiSyncManyUseless(t *testing.T) { 753 t.Parallel() 754 755 var ( 756 once sync.Once 757 cancel = make(chan struct{}) 758 term = func() { 759 once.Do(func() { 760 close(cancel) 761 }) 762 } 763 ) 764 sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) 765 766 mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { 767 source := newTestPeer(name, t, term) 768 source.accountTrie = sourceAccountTrie 769 source.accountValues = elems 770 source.storageTries = storageTries 771 source.storageValues = storageElems 772 773 if !noAccount { 774 source.accountRequestHandler = emptyRequestAccountRangeFn 775 } 776 if !noStorage { 777 source.storageRequestHandler = emptyStorageRequestHandler 778 } 779 if !noTrieNode { 780 source.trieRequestHandler = emptyTrieRequestHandler 781 } 782 return source 783 } 784 785 syncer := setupSyncer( 786 mkSource("full", true, true, true), 787 mkSource("noAccounts", false, true, true), 788 mkSource("noStorage", true, false, true), 789 mkSource("noTrie", true, true, false), 790 ) 791 done := checkStall(t, term) 792 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 793 t.Fatalf("sync failed: %v", err) 794 } 795 close(done) 796 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 797 } 798 799 // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all 800 func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { 801 var ( 802 once sync.Once 803 cancel = make(chan struct{}) 804 term = func() { 805 once.Do(func() { 806 close(cancel) 807 }) 808 } 809 ) 810 sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) 811 812 mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { 813 source := newTestPeer(name, t, term) 814 source.accountTrie = sourceAccountTrie 815 source.accountValues = elems 816 source.storageTries = storageTries 817 source.storageValues = storageElems 818 819 if !noAccount { 820 source.accountRequestHandler = emptyRequestAccountRangeFn 821 } 822 if !noStorage { 823 source.storageRequestHandler = emptyStorageRequestHandler 824 } 825 if !noTrieNode { 826 source.trieRequestHandler = emptyTrieRequestHandler 827 } 828 return source 829 } 830 831 syncer := setupSyncer( 832 mkSource("full", true, true, true), 833 mkSource("noAccounts", false, true, true), 834 mkSource("noStorage", true, false, true), 835 mkSource("noTrie", true, true, false), 836 ) 837 // We're setting the timeout to very low, to increase the chance of the timeout 838 // being triggered. This was previously a cause of panic, when a response 839 // arrived simultaneously as a timeout was triggered. 840 syncer.rates.OverrideTTLLimit = time.Millisecond 841 842 done := checkStall(t, term) 843 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 844 t.Fatalf("sync failed: %v", err) 845 } 846 close(done) 847 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 848 } 849 850 // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all 851 func TestMultiSyncManyUnresponsive(t *testing.T) { 852 var ( 853 once sync.Once 854 cancel = make(chan struct{}) 855 term = func() { 856 once.Do(func() { 857 close(cancel) 858 }) 859 } 860 ) 861 sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) 862 863 mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { 864 source := newTestPeer(name, t, term) 865 source.accountTrie = sourceAccountTrie 866 source.accountValues = elems 867 source.storageTries = storageTries 868 source.storageValues = storageElems 869 870 if !noAccount { 871 source.accountRequestHandler = nonResponsiveRequestAccountRangeFn 872 } 873 if !noStorage { 874 source.storageRequestHandler = nonResponsiveStorageRequestHandler 875 } 876 if !noTrieNode { 877 source.trieRequestHandler = nonResponsiveTrieRequestHandler 878 } 879 return source 880 } 881 882 syncer := setupSyncer( 883 mkSource("full", true, true, true), 884 mkSource("noAccounts", false, true, true), 885 mkSource("noStorage", true, false, true), 886 mkSource("noTrie", true, true, false), 887 ) 888 // We're setting the timeout to very low, to make the test run a bit faster 889 syncer.rates.OverrideTTLLimit = time.Millisecond 890 891 done := checkStall(t, term) 892 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 893 t.Fatalf("sync failed: %v", err) 894 } 895 close(done) 896 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 897 } 898 899 func checkStall(t *testing.T, term func()) chan struct{} { 900 testDone := make(chan struct{}) 901 go func() { 902 select { 903 case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much 904 t.Log("Sync stalled") 905 term() 906 case <-testDone: 907 return 908 } 909 }() 910 return testDone 911 } 912 913 // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the 914 // account trie has a few boundary elements. 915 func TestSyncBoundaryAccountTrie(t *testing.T) { 916 t.Parallel() 917 918 var ( 919 once sync.Once 920 cancel = make(chan struct{}) 921 term = func() { 922 once.Do(func() { 923 close(cancel) 924 }) 925 } 926 ) 927 sourceAccountTrie, elems := makeBoundaryAccountTrie(3000) 928 929 mkSource := func(name string) *testPeer { 930 source := newTestPeer(name, t, term) 931 source.accountTrie = sourceAccountTrie 932 source.accountValues = elems 933 return source 934 } 935 syncer := setupSyncer( 936 mkSource("peer-a"), 937 mkSource("peer-b"), 938 ) 939 done := checkStall(t, term) 940 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 941 t.Fatalf("sync failed: %v", err) 942 } 943 close(done) 944 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 945 } 946 947 // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is 948 // consistently returning very small results 949 func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { 950 t.Parallel() 951 952 var ( 953 once sync.Once 954 cancel = make(chan struct{}) 955 term = func() { 956 once.Do(func() { 957 close(cancel) 958 }) 959 } 960 ) 961 sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) 962 963 mkSource := func(name string, slow bool) *testPeer { 964 source := newTestPeer(name, t, term) 965 source.accountTrie = sourceAccountTrie 966 source.accountValues = elems 967 968 if slow { 969 source.accountRequestHandler = starvingAccountRequestHandler 970 } 971 return source 972 } 973 974 syncer := setupSyncer( 975 mkSource("nice-a", false), 976 mkSource("nice-b", false), 977 mkSource("nice-c", false), 978 mkSource("capped", true), 979 ) 980 done := checkStall(t, term) 981 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 982 t.Fatalf("sync failed: %v", err) 983 } 984 close(done) 985 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 986 } 987 988 // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver 989 // code requests properly. 990 func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { 991 t.Parallel() 992 993 var ( 994 once sync.Once 995 cancel = make(chan struct{}) 996 term = func() { 997 once.Do(func() { 998 close(cancel) 999 }) 1000 } 1001 ) 1002 sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) 1003 1004 mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { 1005 source := newTestPeer(name, t, term) 1006 source.accountTrie = sourceAccountTrie 1007 source.accountValues = elems 1008 source.codeRequestHandler = codeFn 1009 return source 1010 } 1011 // One is capped, one is corrupt. If we don't use a capped one, there's a 50% 1012 // chance that the full set of codes requested are sent only to the 1013 // non-corrupt peer, which delivers everything in one go, and makes the 1014 // test moot 1015 syncer := setupSyncer( 1016 mkSource("capped", cappedCodeRequestHandler), 1017 mkSource("corrupt", corruptCodeRequestHandler), 1018 ) 1019 done := checkStall(t, term) 1020 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 1021 t.Fatalf("sync failed: %v", err) 1022 } 1023 close(done) 1024 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 1025 } 1026 1027 func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { 1028 t.Parallel() 1029 1030 var ( 1031 once sync.Once 1032 cancel = make(chan struct{}) 1033 term = func() { 1034 once.Do(func() { 1035 close(cancel) 1036 }) 1037 } 1038 ) 1039 sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) 1040 1041 mkSource := func(name string, accFn accountHandlerFunc) *testPeer { 1042 source := newTestPeer(name, t, term) 1043 source.accountTrie = sourceAccountTrie 1044 source.accountValues = elems 1045 source.accountRequestHandler = accFn 1046 return source 1047 } 1048 // One is capped, one is corrupt. If we don't use a capped one, there's a 50% 1049 // chance that the full set of codes requested are sent only to the 1050 // non-corrupt peer, which delivers everything in one go, and makes the 1051 // test moot 1052 syncer := setupSyncer( 1053 mkSource("capped", defaultAccountRequestHandler), 1054 mkSource("corrupt", corruptAccountRequestHandler), 1055 ) 1056 done := checkStall(t, term) 1057 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 1058 t.Fatalf("sync failed: %v", err) 1059 } 1060 close(done) 1061 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 1062 } 1063 1064 // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes 1065 // one by one 1066 func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { 1067 t.Parallel() 1068 1069 var ( 1070 once sync.Once 1071 cancel = make(chan struct{}) 1072 term = func() { 1073 once.Do(func() { 1074 close(cancel) 1075 }) 1076 } 1077 ) 1078 sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) 1079 1080 mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { 1081 source := newTestPeer(name, t, term) 1082 source.accountTrie = sourceAccountTrie 1083 source.accountValues = elems 1084 source.codeRequestHandler = codeFn 1085 return source 1086 } 1087 // Count how many times it's invoked. Remember, there are only 8 unique hashes, 1088 // so it shouldn't be more than that 1089 var counter int 1090 syncer := setupSyncer( 1091 mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { 1092 counter++ 1093 return cappedCodeRequestHandler(t, id, hashes, max) 1094 }), 1095 ) 1096 done := checkStall(t, term) 1097 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 1098 t.Fatalf("sync failed: %v", err) 1099 } 1100 close(done) 1101 1102 // There are only 8 unique hashes, and 3K accounts. However, the code 1103 // deduplication is per request batch. If it were a perfect global dedup, 1104 // we would expect only 8 requests. If there were no dedup, there would be 1105 // 3k requests. 1106 // We expect somewhere below 100 requests for these 8 unique hashes. But 1107 // the number can be flaky, so don't limit it so strictly. 1108 if threshold := 100; counter > threshold { 1109 t.Logf("Error, expected < %d invocations, got %d", threshold, counter) 1110 } 1111 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 1112 } 1113 1114 // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the 1115 // storage trie has a few boundary elements. 1116 func TestSyncBoundaryStorageTrie(t *testing.T) { 1117 t.Parallel() 1118 1119 var ( 1120 once sync.Once 1121 cancel = make(chan struct{}) 1122 term = func() { 1123 once.Do(func() { 1124 close(cancel) 1125 }) 1126 } 1127 ) 1128 sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true) 1129 1130 mkSource := func(name string) *testPeer { 1131 source := newTestPeer(name, t, term) 1132 source.accountTrie = sourceAccountTrie 1133 source.accountValues = elems 1134 source.storageTries = storageTries 1135 source.storageValues = storageElems 1136 return source 1137 } 1138 syncer := setupSyncer( 1139 mkSource("peer-a"), 1140 mkSource("peer-b"), 1141 ) 1142 done := checkStall(t, term) 1143 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 1144 t.Fatalf("sync failed: %v", err) 1145 } 1146 close(done) 1147 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 1148 } 1149 1150 // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is 1151 // consistently returning very small results 1152 func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { 1153 t.Parallel() 1154 1155 var ( 1156 once sync.Once 1157 cancel = make(chan struct{}) 1158 term = func() { 1159 once.Do(func() { 1160 close(cancel) 1161 }) 1162 } 1163 ) 1164 sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false) 1165 1166 mkSource := func(name string, slow bool) *testPeer { 1167 source := newTestPeer(name, t, term) 1168 source.accountTrie = sourceAccountTrie 1169 source.accountValues = elems 1170 source.storageTries = storageTries 1171 source.storageValues = storageElems 1172 1173 if slow { 1174 source.storageRequestHandler = starvingStorageRequestHandler 1175 } 1176 return source 1177 } 1178 1179 syncer := setupSyncer( 1180 mkSource("nice-a", false), 1181 mkSource("slow", true), 1182 ) 1183 done := checkStall(t, term) 1184 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 1185 t.Fatalf("sync failed: %v", err) 1186 } 1187 close(done) 1188 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 1189 } 1190 1191 // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is 1192 // sometimes sending bad proofs 1193 func TestSyncWithStorageAndCorruptPeer(t *testing.T) { 1194 t.Parallel() 1195 1196 var ( 1197 once sync.Once 1198 cancel = make(chan struct{}) 1199 term = func() { 1200 once.Do(func() { 1201 close(cancel) 1202 }) 1203 } 1204 ) 1205 sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) 1206 1207 mkSource := func(name string, handler storageHandlerFunc) *testPeer { 1208 source := newTestPeer(name, t, term) 1209 source.accountTrie = sourceAccountTrie 1210 source.accountValues = elems 1211 source.storageTries = storageTries 1212 source.storageValues = storageElems 1213 source.storageRequestHandler = handler 1214 return source 1215 } 1216 1217 syncer := setupSyncer( 1218 mkSource("nice-a", defaultStorageRequestHandler), 1219 mkSource("nice-b", defaultStorageRequestHandler), 1220 mkSource("nice-c", defaultStorageRequestHandler), 1221 mkSource("corrupt", corruptStorageRequestHandler), 1222 ) 1223 done := checkStall(t, term) 1224 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 1225 t.Fatalf("sync failed: %v", err) 1226 } 1227 close(done) 1228 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 1229 } 1230 1231 func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { 1232 t.Parallel() 1233 1234 var ( 1235 once sync.Once 1236 cancel = make(chan struct{}) 1237 term = func() { 1238 once.Do(func() { 1239 close(cancel) 1240 }) 1241 } 1242 ) 1243 sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) 1244 1245 mkSource := func(name string, handler storageHandlerFunc) *testPeer { 1246 source := newTestPeer(name, t, term) 1247 source.accountTrie = sourceAccountTrie 1248 source.accountValues = elems 1249 source.storageTries = storageTries 1250 source.storageValues = storageElems 1251 source.storageRequestHandler = handler 1252 return source 1253 } 1254 syncer := setupSyncer( 1255 mkSource("nice-a", defaultStorageRequestHandler), 1256 mkSource("nice-b", defaultStorageRequestHandler), 1257 mkSource("nice-c", defaultStorageRequestHandler), 1258 mkSource("corrupt", noProofStorageRequestHandler), 1259 ) 1260 done := checkStall(t, term) 1261 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 1262 t.Fatalf("sync failed: %v", err) 1263 } 1264 close(done) 1265 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 1266 } 1267 1268 // TestSyncWithStorage tests basic sync using accounts + storage + code, against 1269 // a peer who insists on delivering full storage sets _and_ proofs. This triggered 1270 // an error, where the recipient erroneously clipped the boundary nodes, but 1271 // did not mark the account for healing. 1272 func TestSyncWithStorageMisbehavingProve(t *testing.T) { 1273 t.Parallel() 1274 var ( 1275 once sync.Once 1276 cancel = make(chan struct{}) 1277 term = func() { 1278 once.Do(func() { 1279 close(cancel) 1280 }) 1281 } 1282 ) 1283 sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false) 1284 1285 mkSource := func(name string) *testPeer { 1286 source := newTestPeer(name, t, term) 1287 source.accountTrie = sourceAccountTrie 1288 source.accountValues = elems 1289 source.storageTries = storageTries 1290 source.storageValues = storageElems 1291 source.storageRequestHandler = proofHappyStorageRequestHandler 1292 return source 1293 } 1294 syncer := setupSyncer(mkSource("sourceA")) 1295 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 1296 t.Fatalf("sync failed: %v", err) 1297 } 1298 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 1299 } 1300 1301 type kv struct { 1302 k, v []byte 1303 } 1304 1305 // Some helpers for sorting 1306 type entrySlice []*kv 1307 1308 func (p entrySlice) Len() int { return len(p) } 1309 func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 } 1310 func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } 1311 1312 func key32(i uint64) []byte { 1313 key := make([]byte, 32) 1314 binary.LittleEndian.PutUint64(key, i) 1315 return key 1316 } 1317 1318 var ( 1319 codehashes = []common.Hash{ 1320 crypto.Keccak256Hash([]byte{0}), 1321 crypto.Keccak256Hash([]byte{1}), 1322 crypto.Keccak256Hash([]byte{2}), 1323 crypto.Keccak256Hash([]byte{3}), 1324 crypto.Keccak256Hash([]byte{4}), 1325 crypto.Keccak256Hash([]byte{5}), 1326 crypto.Keccak256Hash([]byte{6}), 1327 crypto.Keccak256Hash([]byte{7}), 1328 } 1329 ) 1330 1331 // getCodeHash returns a pseudo-random code hash 1332 func getCodeHash(i uint64) []byte { 1333 h := codehashes[int(i)%len(codehashes)] 1334 return common.CopyBytes(h[:]) 1335 } 1336 1337 // getCodeByHash convenience function to lookup the code from the code hash 1338 func getCodeByHash(hash common.Hash) []byte { 1339 if hash == emptyCode { 1340 return nil 1341 } 1342 for i, h := range codehashes { 1343 if h == hash { 1344 return []byte{byte(i)} 1345 } 1346 } 1347 return nil 1348 } 1349 1350 // makeAccountTrieNoStorage spits out a trie, along with the leafs 1351 func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { 1352 var ( 1353 db = trie.NewDatabase(rawdb.NewMemoryDatabase()) 1354 accTrie = trie.NewEmpty(db) 1355 entries entrySlice 1356 ) 1357 for i := uint64(1); i <= uint64(n); i++ { 1358 value, _ := rlp.EncodeToBytes(&types.StateAccount{ 1359 Nonce: i, 1360 Balance: big.NewInt(int64(i)), 1361 Root: emptyRoot, 1362 CodeHash: getCodeHash(i), 1363 }) 1364 key := key32(i) 1365 elem := &kv{key, value} 1366 accTrie.Update(elem.k, elem.v) 1367 entries = append(entries, elem) 1368 } 1369 sort.Sort(entries) 1370 1371 // Commit the state changes into db and re-create the trie 1372 // for accessing later. 1373 root, nodes, _ := accTrie.Commit(false) 1374 db.Update(trie.NewWithNodeSet(nodes)) 1375 1376 accTrie, _ = trie.New(common.Hash{}, root, db) 1377 return accTrie, entries 1378 } 1379 1380 // makeBoundaryAccountTrie constructs an account trie. Instead of filling 1381 // accounts normally, this function will fill a few accounts which have 1382 // boundary hash. 1383 func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { 1384 var ( 1385 entries entrySlice 1386 boundaries []common.Hash 1387 1388 db = trie.NewDatabase(rawdb.NewMemoryDatabase()) 1389 accTrie = trie.NewEmpty(db) 1390 ) 1391 // Initialize boundaries 1392 var next common.Hash 1393 step := new(big.Int).Sub( 1394 new(big.Int).Div( 1395 new(big.Int).Exp(common.Big2, common.Big256, nil), 1396 big.NewInt(int64(accountConcurrency)), 1397 ), common.Big1, 1398 ) 1399 for i := 0; i < accountConcurrency; i++ { 1400 last := common.BigToHash(new(big.Int).Add(next.Big(), step)) 1401 if i == accountConcurrency-1 { 1402 last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") 1403 } 1404 boundaries = append(boundaries, last) 1405 next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) 1406 } 1407 // Fill boundary accounts 1408 for i := 0; i < len(boundaries); i++ { 1409 value, _ := rlp.EncodeToBytes(&types.StateAccount{ 1410 Nonce: uint64(0), 1411 Balance: big.NewInt(int64(i)), 1412 Root: emptyRoot, 1413 CodeHash: getCodeHash(uint64(i)), 1414 }) 1415 elem := &kv{boundaries[i].Bytes(), value} 1416 accTrie.Update(elem.k, elem.v) 1417 entries = append(entries, elem) 1418 } 1419 // Fill other accounts if required 1420 for i := uint64(1); i <= uint64(n); i++ { 1421 value, _ := rlp.EncodeToBytes(&types.StateAccount{ 1422 Nonce: i, 1423 Balance: big.NewInt(int64(i)), 1424 Root: emptyRoot, 1425 CodeHash: getCodeHash(i), 1426 }) 1427 elem := &kv{key32(i), value} 1428 accTrie.Update(elem.k, elem.v) 1429 entries = append(entries, elem) 1430 } 1431 sort.Sort(entries) 1432 1433 // Commit the state changes into db and re-create the trie 1434 // for accessing later. 1435 root, nodes, _ := accTrie.Commit(false) 1436 db.Update(trie.NewWithNodeSet(nodes)) 1437 1438 accTrie, _ = trie.New(common.Hash{}, root, db) 1439 return accTrie, entries 1440 } 1441 1442 // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts 1443 // has a unique storage set. 1444 func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { 1445 var ( 1446 db = trie.NewDatabase(rawdb.NewMemoryDatabase()) 1447 accTrie = trie.NewEmpty(db) 1448 entries entrySlice 1449 storageRoots = make(map[common.Hash]common.Hash) 1450 storageTries = make(map[common.Hash]*trie.Trie) 1451 storageEntries = make(map[common.Hash]entrySlice) 1452 nodes = trie.NewMergedNodeSet() 1453 ) 1454 // Create n accounts in the trie 1455 for i := uint64(1); i <= uint64(accounts); i++ { 1456 key := key32(i) 1457 codehash := emptyCode[:] 1458 if code { 1459 codehash = getCodeHash(i) 1460 } 1461 // Create a storage trie 1462 stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db) 1463 nodes.Merge(stNodes) 1464 1465 value, _ := rlp.EncodeToBytes(&types.StateAccount{ 1466 Nonce: i, 1467 Balance: big.NewInt(int64(i)), 1468 Root: stRoot, 1469 CodeHash: codehash, 1470 }) 1471 elem := &kv{key, value} 1472 accTrie.Update(elem.k, elem.v) 1473 entries = append(entries, elem) 1474 1475 storageRoots[common.BytesToHash(key)] = stRoot 1476 storageEntries[common.BytesToHash(key)] = stEntries 1477 } 1478 sort.Sort(entries) 1479 1480 // Commit account trie 1481 root, set, _ := accTrie.Commit(true) 1482 nodes.Merge(set) 1483 1484 // Commit gathered dirty nodes into database 1485 db.Update(nodes) 1486 1487 // Re-create tries with new root 1488 accTrie, _ = trie.New(common.Hash{}, root, db) 1489 for i := uint64(1); i <= uint64(accounts); i++ { 1490 key := key32(i) 1491 trie, _ := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db) 1492 storageTries[common.BytesToHash(key)] = trie 1493 } 1494 return accTrie, entries, storageTries, storageEntries 1495 } 1496 1497 // makeAccountTrieWithStorage spits out a trie, along with the leafs 1498 func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { 1499 var ( 1500 db = trie.NewDatabase(rawdb.NewMemoryDatabase()) 1501 accTrie = trie.NewEmpty(db) 1502 entries entrySlice 1503 storageRoots = make(map[common.Hash]common.Hash) 1504 storageTries = make(map[common.Hash]*trie.Trie) 1505 storageEntries = make(map[common.Hash]entrySlice) 1506 nodes = trie.NewMergedNodeSet() 1507 ) 1508 // Create n accounts in the trie 1509 for i := uint64(1); i <= uint64(accounts); i++ { 1510 key := key32(i) 1511 codehash := emptyCode[:] 1512 if code { 1513 codehash = getCodeHash(i) 1514 } 1515 // Make a storage trie 1516 var ( 1517 stRoot common.Hash 1518 stNodes *trie.NodeSet 1519 stEntries entrySlice 1520 ) 1521 if boundary { 1522 stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db) 1523 } else { 1524 stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db) 1525 } 1526 nodes.Merge(stNodes) 1527 1528 value, _ := rlp.EncodeToBytes(&types.StateAccount{ 1529 Nonce: i, 1530 Balance: big.NewInt(int64(i)), 1531 Root: stRoot, 1532 CodeHash: codehash, 1533 }) 1534 elem := &kv{key, value} 1535 accTrie.Update(elem.k, elem.v) 1536 entries = append(entries, elem) 1537 1538 // we reuse the same one for all accounts 1539 storageRoots[common.BytesToHash(key)] = stRoot 1540 storageEntries[common.BytesToHash(key)] = stEntries 1541 } 1542 sort.Sort(entries) 1543 1544 // Commit account trie 1545 root, set, _ := accTrie.Commit(true) 1546 nodes.Merge(set) 1547 1548 // Commit gathered dirty nodes into database 1549 db.Update(nodes) 1550 1551 // Re-create tries with new root 1552 accTrie, err := trie.New(common.Hash{}, root, db) 1553 if err != nil { 1554 panic(err) 1555 } 1556 for i := uint64(1); i <= uint64(accounts); i++ { 1557 key := key32(i) 1558 trie, err := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db) 1559 if err != nil { 1560 panic(err) 1561 } 1562 storageTries[common.BytesToHash(key)] = trie 1563 } 1564 return accTrie, entries, storageTries, storageEntries 1565 } 1566 1567 // makeStorageTrieWithSeed fills a storage trie with n items, returning the 1568 // not-yet-committed trie and the sorted entries. The seeds can be used to ensure 1569 // that tries are unique. 1570 func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) { 1571 trie, _ := trie.New(owner, common.Hash{}, db) 1572 var entries entrySlice 1573 for i := uint64(1); i <= n; i++ { 1574 // store 'x' at slot 'x' 1575 slotValue := key32(i + seed) 1576 rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) 1577 1578 slotKey := key32(i) 1579 key := crypto.Keccak256Hash(slotKey[:]) 1580 1581 elem := &kv{key[:], rlpSlotValue} 1582 trie.Update(elem.k, elem.v) 1583 entries = append(entries, elem) 1584 } 1585 sort.Sort(entries) 1586 root, nodes, _ := trie.Commit(false) 1587 return root, nodes, entries 1588 } 1589 1590 // makeBoundaryStorageTrie constructs a storage trie. Instead of filling 1591 // storage slots normally, this function will fill a few slots which have 1592 // boundary hash. 1593 func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) { 1594 var ( 1595 entries entrySlice 1596 boundaries []common.Hash 1597 trie, _ = trie.New(owner, common.Hash{}, db) 1598 ) 1599 // Initialize boundaries 1600 var next common.Hash 1601 step := new(big.Int).Sub( 1602 new(big.Int).Div( 1603 new(big.Int).Exp(common.Big2, common.Big256, nil), 1604 big.NewInt(int64(accountConcurrency)), 1605 ), common.Big1, 1606 ) 1607 for i := 0; i < accountConcurrency; i++ { 1608 last := common.BigToHash(new(big.Int).Add(next.Big(), step)) 1609 if i == accountConcurrency-1 { 1610 last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") 1611 } 1612 boundaries = append(boundaries, last) 1613 next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) 1614 } 1615 // Fill boundary slots 1616 for i := 0; i < len(boundaries); i++ { 1617 key := boundaries[i] 1618 val := []byte{0xde, 0xad, 0xbe, 0xef} 1619 1620 elem := &kv{key[:], val} 1621 trie.Update(elem.k, elem.v) 1622 entries = append(entries, elem) 1623 } 1624 // Fill other slots if required 1625 for i := uint64(1); i <= uint64(n); i++ { 1626 slotKey := key32(i) 1627 key := crypto.Keccak256Hash(slotKey[:]) 1628 1629 slotValue := key32(i) 1630 rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) 1631 1632 elem := &kv{key[:], rlpSlotValue} 1633 trie.Update(elem.k, elem.v) 1634 entries = append(entries, elem) 1635 } 1636 sort.Sort(entries) 1637 root, nodes, _ := trie.Commit(false) 1638 return root, nodes, entries 1639 } 1640 1641 func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { 1642 t.Helper() 1643 triedb := trie.NewDatabase(db) 1644 accTrie, err := trie.New(common.Hash{}, root, triedb) 1645 if err != nil { 1646 t.Fatal(err) 1647 } 1648 accounts, slots := 0, 0 1649 accIt := trie.NewIterator(accTrie.NodeIterator(nil)) 1650 for accIt.Next() { 1651 var acc struct { 1652 Nonce uint64 1653 Balance *big.Int 1654 Root common.Hash 1655 CodeHash []byte 1656 } 1657 if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { 1658 log.Crit("Invalid account encountered during snapshot creation", "err", err) 1659 } 1660 accounts++ 1661 if acc.Root != emptyRoot { 1662 storeTrie, err := trie.NewStateTrie(common.BytesToHash(accIt.Key), acc.Root, triedb) 1663 if err != nil { 1664 t.Fatal(err) 1665 } 1666 storeIt := trie.NewIterator(storeTrie.NodeIterator(nil)) 1667 for storeIt.Next() { 1668 slots++ 1669 } 1670 if err := storeIt.Err; err != nil { 1671 t.Fatal(err) 1672 } 1673 } 1674 } 1675 if err := accIt.Err; err != nil { 1676 t.Fatal(err) 1677 } 1678 t.Logf("accounts: %d, slots: %d", accounts, slots) 1679 } 1680 1681 // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing 1682 // state healing 1683 func TestSyncAccountPerformance(t *testing.T) { 1684 // Set the account concurrency to 1. This _should_ result in the 1685 // range root to become correct, and there should be no healing needed 1686 defer func(old int) { accountConcurrency = old }(accountConcurrency) 1687 accountConcurrency = 1 1688 1689 var ( 1690 once sync.Once 1691 cancel = make(chan struct{}) 1692 term = func() { 1693 once.Do(func() { 1694 close(cancel) 1695 }) 1696 } 1697 ) 1698 sourceAccountTrie, elems := makeAccountTrieNoStorage(100) 1699 1700 mkSource := func(name string) *testPeer { 1701 source := newTestPeer(name, t, term) 1702 source.accountTrie = sourceAccountTrie 1703 source.accountValues = elems 1704 return source 1705 } 1706 src := mkSource("source") 1707 syncer := setupSyncer(src) 1708 if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { 1709 t.Fatalf("sync failed: %v", err) 1710 } 1711 verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) 1712 // The trie root will always be requested, since it is added when the snap 1713 // sync cycle starts. When popping the queue, we do not look it up again. 1714 // Doing so would bring this number down to zero in this artificial testcase, 1715 // but only add extra IO for no reason in practice. 1716 if have, want := src.nTrienodeRequests, 1; have != want { 1717 fmt.Print(src.Stats()) 1718 t.Errorf("trie node heal requests wrong, want %d, have %d", want, have) 1719 } 1720 } 1721 1722 func TestSlotEstimation(t *testing.T) { 1723 for i, tc := range []struct { 1724 last common.Hash 1725 count int 1726 want uint64 1727 }{ 1728 { 1729 // Half the space 1730 common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), 1731 100, 1732 100, 1733 }, 1734 { 1735 // 1 / 16th 1736 common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), 1737 100, 1738 1500, 1739 }, 1740 { 1741 // Bit more than 1 / 16th 1742 common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"), 1743 100, 1744 1499, 1745 }, 1746 { 1747 // Almost everything 1748 common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"), 1749 100, 1750 6, 1751 }, 1752 { 1753 // Almost nothing -- should lead to error 1754 common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), 1755 1, 1756 0, 1757 }, 1758 { 1759 // Nothing -- should lead to error 1760 common.Hash{}, 1761 100, 1762 0, 1763 }, 1764 } { 1765 have, _ := estimateRemainingSlots(tc.count, tc.last) 1766 if want := tc.want; have != want { 1767 t.Errorf("test %d: have %d want %d", i, have, want) 1768 } 1769 } 1770 }