github.com/theQRL/go-zond@v0.2.1/zond/downloader/downloader_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "fmt" 21 "math/big" 22 "os" 23 "sync" 24 "sync/atomic" 25 "testing" 26 "time" 27 28 "github.com/theQRL/go-zond" 29 "github.com/theQRL/go-zond/common" 30 "github.com/theQRL/go-zond/consensus/beacon" 31 "github.com/theQRL/go-zond/core" 32 "github.com/theQRL/go-zond/core/rawdb" 33 "github.com/theQRL/go-zond/core/types" 34 "github.com/theQRL/go-zond/core/vm" 35 "github.com/theQRL/go-zond/event" 36 "github.com/theQRL/go-zond/log" 37 "github.com/theQRL/go-zond/params" 38 "github.com/theQRL/go-zond/rlp" 39 "github.com/theQRL/go-zond/trie" 40 "github.com/theQRL/go-zond/zond/protocols/snap" 41 zondproto "github.com/theQRL/go-zond/zond/protocols/zond" 42 ) 43 44 // downloadTester is a test simulator for mocking out local block chain. 45 type downloadTester struct { 46 freezer string 47 chain *core.BlockChain 48 downloader *Downloader 49 50 peers map[string]*downloadTesterPeer 51 lock sync.RWMutex 52 } 53 54 // newTester creates a new downloader test mocker. 55 func newTester(t *testing.T) *downloadTester { 56 return newTesterWithNotification(t, nil) 57 } 58 59 // newTester creates a new downloader test mocker. 60 func newTesterWithNotification(t *testing.T, success func()) *downloadTester { 61 freezer := t.TempDir() 62 db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) 63 if err != nil { 64 panic(err) 65 } 66 t.Cleanup(func() { 67 db.Close() 68 }) 69 gspec := &core.Genesis{ 70 Config: params.TestChainConfig, 71 Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, 72 BaseFee: big.NewInt(params.InitialBaseFee), 73 } 74 chain, err := core.NewBlockChain(db, nil, gspec, beacon.NewFaker(), vm.Config{}, nil) 75 if err != nil { 76 panic(err) 77 } 78 tester := &downloadTester{ 79 freezer: freezer, 80 chain: chain, 81 peers: make(map[string]*downloadTesterPeer), 82 } 83 tester.downloader = New(db, new(event.TypeMux), tester.chain, tester.dropPeer, success) 84 return tester 85 } 86 87 // terminate aborts any operations on the embedded downloader and releases all 88 // held resources. 89 func (dl *downloadTester) terminate() { 90 dl.downloader.Terminate() 91 dl.chain.Stop() 92 93 os.RemoveAll(dl.freezer) 94 } 95 96 // newPeer registers a new block download source into the downloader. 97 func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer { 98 dl.lock.Lock() 99 defer dl.lock.Unlock() 100 101 peer := &downloadTesterPeer{ 102 dl: dl, 103 id: id, 104 chain: newTestBlockchain(blocks), 105 withholdBodies: make(map[common.Hash]struct{}), 106 } 107 dl.peers[id] = peer 108 109 if err := dl.downloader.RegisterPeer(id, version, peer); err != nil { 110 panic(err) 111 } 112 if err := dl.downloader.SnapSyncer.Register(peer); err != nil { 113 panic(err) 114 } 115 return peer 116 } 117 118 // dropPeer simulates a hard peer removal from the connection pool. 119 func (dl *downloadTester) dropPeer(id string) { 120 dl.lock.Lock() 121 defer dl.lock.Unlock() 122 123 delete(dl.peers, id) 124 dl.downloader.SnapSyncer.Unregister(id) 125 dl.downloader.UnregisterPeer(id) 126 } 127 128 type downloadTesterPeer struct { 129 dl *downloadTester 130 withholdBodies map[common.Hash]struct{} 131 id string 132 chain *core.BlockChain 133 } 134 135 // Head constructs a function to retrieve a peer's current head hash 136 // and total difficulty. 137 func (dlp *downloadTesterPeer) Head() common.Hash { 138 head := dlp.chain.CurrentBlock() 139 return head.Hash() 140 } 141 142 func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { 143 var headers = make([]*types.Header, len(rlpdata)) 144 for i, data := range rlpdata { 145 var h types.Header 146 if err := rlp.DecodeBytes(data, &h); err != nil { 147 panic(err) 148 } 149 headers[i] = &h 150 } 151 return headers 152 } 153 154 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 155 // origin; associated with a particular peer in the download tester. The returned 156 // function can be used to retrieve batches of headers from the particular peer. 157 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *zondproto.Response) (*zondproto.Request, error) { 158 // Service the header query via the live handler code 159 rlpHeaders := zondproto.ServiceGetBlockHeadersQuery(dlp.chain, &zondproto.GetBlockHeadersRequest{ 160 Origin: zondproto.HashOrNumber{ 161 Hash: origin, 162 }, 163 Amount: uint64(amount), 164 Skip: uint64(skip), 165 Reverse: reverse, 166 }, nil) 167 headers := unmarshalRlpHeaders(rlpHeaders) 168 hashes := make([]common.Hash, len(headers)) 169 for i, header := range headers { 170 hashes[i] = header.Hash() 171 } 172 // Deliver the headers to the downloader 173 req := &zondproto.Request{ 174 Peer: dlp.id, 175 } 176 res := &zondproto.Response{ 177 Req: req, 178 Res: (*zondproto.BlockHeadersRequest)(&headers), 179 Meta: hashes, 180 Time: 1, 181 Done: make(chan error, 1), // Ignore the returned status 182 } 183 go func() { 184 sink <- res 185 }() 186 return req, nil 187 } 188 189 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 190 // origin; associated with a particular peer in the download tester. The returned 191 // function can be used to retrieve batches of headers from the particular peer. 192 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *zondproto.Response) (*zondproto.Request, error) { 193 // Service the header query via the live handler code 194 rlpHeaders := zondproto.ServiceGetBlockHeadersQuery(dlp.chain, &zondproto.GetBlockHeadersRequest{ 195 Origin: zondproto.HashOrNumber{ 196 Number: origin, 197 }, 198 Amount: uint64(amount), 199 Skip: uint64(skip), 200 Reverse: reverse, 201 }, nil) 202 headers := unmarshalRlpHeaders(rlpHeaders) 203 hashes := make([]common.Hash, len(headers)) 204 for i, header := range headers { 205 hashes[i] = header.Hash() 206 } 207 // Deliver the headers to the downloader 208 req := &zondproto.Request{ 209 Peer: dlp.id, 210 } 211 res := &zondproto.Response{ 212 Req: req, 213 Res: (*zondproto.BlockHeadersRequest)(&headers), 214 Meta: hashes, 215 Time: 1, 216 Done: make(chan error, 1), // Ignore the returned status 217 } 218 go func() { 219 sink <- res 220 }() 221 return req, nil 222 } 223 224 // RequestBodies constructs a getBlockBodies method associated with a particular 225 // peer in the download tester. The returned function can be used to retrieve 226 // batches of block bodies from the particularly requested peer. 227 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *zondproto.Response) (*zondproto.Request, error) { 228 blobs := zondproto.ServiceGetBlockBodiesQuery(dlp.chain, hashes) 229 230 bodies := make([]*zondproto.BlockBody, len(blobs)) 231 for i, blob := range blobs { 232 bodies[i] = new(zondproto.BlockBody) 233 rlp.DecodeBytes(blob, bodies[i]) 234 } 235 var ( 236 txsHashes = make([]common.Hash, len(bodies)) 237 withdrawalHashes = make([]common.Hash, len(bodies)) 238 ) 239 hasher := trie.NewStackTrie(nil) 240 for i, body := range bodies { 241 hash := types.DeriveSha(types.Transactions(body.Transactions), hasher) 242 if _, ok := dlp.withholdBodies[hash]; ok { 243 txsHashes = append(txsHashes[:i], txsHashes[i+1:]...) 244 continue 245 } 246 txsHashes[i] = hash 247 248 hash = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher) 249 withdrawalHashes[i] = hash 250 } 251 req := &zondproto.Request{ 252 Peer: dlp.id, 253 } 254 res := &zondproto.Response{ 255 Req: req, 256 Res: (*zondproto.BlockBodiesResponse)(&bodies), 257 Meta: [][]common.Hash{txsHashes, withdrawalHashes}, 258 Time: 1, 259 Done: make(chan error, 1), // Ignore the returned status 260 } 261 go func() { 262 sink <- res 263 }() 264 return req, nil 265 } 266 267 // RequestReceipts constructs a getReceipts method associated with a particular 268 // peer in the download tester. The returned function can be used to retrieve 269 // batches of block receipts from the particularly requested peer. 270 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *zondproto.Response) (*zondproto.Request, error) { 271 blobs := zondproto.ServiceGetReceiptsQuery(dlp.chain, hashes) 272 273 receipts := make([][]*types.Receipt, len(blobs)) 274 for i, blob := range blobs { 275 rlp.DecodeBytes(blob, &receipts[i]) 276 } 277 hasher := trie.NewStackTrie(nil) 278 hashes = make([]common.Hash, len(receipts)) 279 for i, receipt := range receipts { 280 hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) 281 } 282 req := &zondproto.Request{ 283 Peer: dlp.id, 284 } 285 res := &zondproto.Response{ 286 Req: req, 287 Res: (*zondproto.ReceiptsResponse)(&receipts), 288 Meta: hashes, 289 Time: 1, 290 Done: make(chan error, 1), // Ignore the returned status 291 } 292 go func() { 293 sink <- res 294 }() 295 return req, nil 296 } 297 298 // ID retrieves the peer's unique identifier. 299 func (dlp *downloadTesterPeer) ID() string { 300 return dlp.id 301 } 302 303 // RequestAccountRange fetches a batch of accounts rooted in a specific account 304 // trie, starting with the origin. 305 func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { 306 // Create the request and service it 307 req := &snap.GetAccountRangePacket{ 308 ID: id, 309 Root: root, 310 Origin: origin, 311 Limit: limit, 312 Bytes: bytes, 313 } 314 slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req) 315 316 // We need to convert to non-slim format, delegate to the packet code 317 res := &snap.AccountRangePacket{ 318 ID: id, 319 Accounts: slimaccs, 320 Proof: proofs, 321 } 322 hashes, accounts, _ := res.Unpack() 323 324 go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs) 325 return nil 326 } 327 328 // RequestStorageRanges fetches a batch of storage slots belonging to one or 329 // more accounts. If slots from only one account is requested, an origin marker 330 // may also be used to retrieve from there. 331 func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { 332 // Create the request and service it 333 req := &snap.GetStorageRangesPacket{ 334 ID: id, 335 Accounts: accounts, 336 Root: root, 337 Origin: origin, 338 Limit: limit, 339 Bytes: bytes, 340 } 341 storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req) 342 343 // We need to convert to demultiplex, delegate to the packet code 344 res := &snap.StorageRangesPacket{ 345 ID: id, 346 Slots: storage, 347 Proof: proofs, 348 } 349 hashes, slots := res.Unpack() 350 351 go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs) 352 return nil 353 } 354 355 // RequestByteCodes fetches a batch of bytecodes by hash. 356 func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { 357 req := &snap.GetByteCodesPacket{ 358 ID: id, 359 Hashes: hashes, 360 Bytes: bytes, 361 } 362 codes := snap.ServiceGetByteCodesQuery(dlp.chain, req) 363 go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes) 364 return nil 365 } 366 367 // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in 368 // a specific state trie. 369 func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error { 370 req := &snap.GetTrieNodesPacket{ 371 ID: id, 372 Root: root, 373 Paths: paths, 374 Bytes: bytes, 375 } 376 nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now()) 377 go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes) 378 return nil 379 } 380 381 // Log retrieves the peer's own contextual logger. 382 func (dlp *downloadTesterPeer) Log() log.Logger { 383 return log.New("peer", dlp.id) 384 } 385 386 // assertOwnChain checks if the local chain contains the correct number of items 387 // of the various chain components. 388 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 389 // Mark this method as a helper to report errors at callsite, not in here 390 t.Helper() 391 392 headers, blocks, receipts := length, length, length 393 if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers { 394 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 395 } 396 if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != blocks { 397 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 398 } 399 if rs := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1; rs != receipts { 400 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 401 } 402 } 403 404 func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, zondproto.ETH68, FullSync) } 405 func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, zondproto.ETH68, SnapSync) } 406 407 func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { 408 success := make(chan struct{}) 409 tester := newTesterWithNotification(t, func() { 410 close(success) 411 }) 412 defer tester.terminate() 413 414 // Create a small enough block chain to download 415 chain := testChainBase.shorten(blockCacheMaxItems - 15) 416 tester.newPeer("peer", protocol, chain.blocks[1:]) 417 418 // Synchronise with the peer and make sure all relevant data was retrieved 419 if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { 420 t.Fatalf("failed to beacon-sync chain: %v", err) 421 } 422 select { 423 case <-success: 424 assertOwnChain(t, tester, len(chain.blocks)) 425 case <-time.NewTimer(time.Second * 3).C: 426 t.Fatalf("Failed to sync chain in three seconds") 427 } 428 } 429 430 // Tests that if a large batch of blocks are being downloaded, it is throttled 431 // until the cached blocks are retrieved. 432 func TestThrottling68Full(t *testing.T) { testThrottling(t, zondproto.ETH68, FullSync) } 433 434 func TestThrottling68Snap(t *testing.T) { testThrottling(t, zondproto.ETH68, SnapSync) } 435 436 func testThrottling(t *testing.T, protocol uint, mode SyncMode) { 437 tester := newTester(t) 438 defer tester.terminate() 439 440 // Create a long block chain to download and the tester 441 targetBlocks := len(testChainBase.blocks) - 1 442 tester.newPeer("peer", protocol, testChainBase.blocks[1:]) 443 444 // Wrap the importer to allow stepping 445 var blocked atomic.Uint32 446 proceed := make(chan struct{}) 447 tester.downloader.chainInsertHook = func(results []*fetchResult) { 448 blocked.Store(uint32(len(results))) 449 <-proceed 450 } 451 // Start a synchronisation concurrently 452 errc := make(chan error, 1) 453 go func() { 454 errc <- tester.downloader.BeaconSync(mode, testChainBase.blocks[len(testChainBase.blocks)-1].Header(), nil) 455 }() 456 // Iteratively take some blocks, always checking the retrieval count 457 for { 458 // Check the retrieval count synchronously (! reason for this ugly block) 459 tester.lock.RLock() 460 retrieved := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1 461 tester.lock.RUnlock() 462 if retrieved >= targetBlocks+1 { 463 break 464 } 465 // Wait a bit for sync to throttle itself 466 var cached, frozen int 467 for start := time.Now(); time.Since(start) < 3*time.Second; { 468 time.Sleep(25 * time.Millisecond) 469 470 tester.lock.Lock() 471 tester.downloader.queue.lock.Lock() 472 tester.downloader.queue.resultCache.lock.Lock() 473 { 474 cached = tester.downloader.queue.resultCache.countCompleted() 475 frozen = int(blocked.Load()) 476 retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1 477 } 478 tester.downloader.queue.resultCache.lock.Unlock() 479 tester.downloader.queue.lock.Unlock() 480 tester.lock.Unlock() 481 482 if cached == blockCacheMaxItems || 483 cached == blockCacheMaxItems-reorgProtHeaderDelay || 484 retrieved+cached+frozen == targetBlocks+1 || 485 retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { 486 break 487 } 488 } 489 // Make sure we filled up the cache, then exhaust it 490 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 491 tester.lock.RLock() 492 retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1 493 tester.lock.RUnlock() 494 if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { 495 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) 496 } 497 // Permit the blocked blocks to import 498 if blocked.Load() > 0 { 499 blocked.Store(uint32(0)) 500 proceed <- struct{}{} 501 } 502 } 503 // Check that we haven't pulled more blocks than available 504 assertOwnChain(t, tester, targetBlocks+1) 505 if err := <-errc; err != nil { 506 t.Fatalf("block synchronization failed: %v", err) 507 } 508 } 509 510 // Tests that a canceled download wipes all previously accumulated state. 511 func TestCancel68Full(t *testing.T) { testCancel(t, zondproto.ETH68, FullSync) } 512 func TestCancel68Snap(t *testing.T) { testCancel(t, zondproto.ETH68, SnapSync) } 513 514 func testCancel(t *testing.T, protocol uint, mode SyncMode) { 515 complete := make(chan struct{}) 516 success := func() { 517 close(complete) 518 } 519 tester := newTesterWithNotification(t, success) 520 defer tester.terminate() 521 522 chain := testChainBase.shorten(MaxHeaderFetch) 523 tester.newPeer("peer", protocol, chain.blocks[1:]) 524 525 // Make sure canceling works with a pristine downloader 526 tester.downloader.Cancel() 527 if !tester.downloader.queue.Idle() { 528 t.Errorf("download queue not idle") 529 } 530 // Synchronise with the peer, but cancel afterwards 531 if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { 532 t.Fatalf("failed to synchronise blocks: %v", err) 533 } 534 <-complete 535 tester.downloader.Cancel() 536 if !tester.downloader.queue.Idle() { 537 t.Errorf("download queue not idle") 538 } 539 } 540 541 // Tests that synchronisations behave well in multi-version protocol environments 542 // and not wreak havoc on other nodes in the network. 543 func TestMultiProtoSynchronisation68Full(t *testing.T) { 544 testMultiProtoSync(t, zondproto.ETH68, FullSync) 545 } 546 func TestMultiProtoSynchronisation68Snap(t *testing.T) { 547 testMultiProtoSync(t, zondproto.ETH68, SnapSync) 548 } 549 550 func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { 551 complete := make(chan struct{}) 552 success := func() { 553 close(complete) 554 } 555 tester := newTesterWithNotification(t, success) 556 defer tester.terminate() 557 558 // Create a small enough block chain to download 559 chain := testChainBase.shorten(blockCacheMaxItems - 15) 560 561 // Create peers of every type 562 tester.newPeer("peer 68", zondproto.ETH68, chain.blocks[1:]) 563 564 if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { 565 t.Fatalf("failed to start beacon sync: #{err}") 566 } 567 select { 568 case <-complete: 569 break 570 case <-time.NewTimer(time.Second * 3).C: 571 t.Fatalf("Failed to sync chain in three seconds") 572 } 573 assertOwnChain(t, tester, len(chain.blocks)) 574 575 // Check that no peers have been dropped off 576 for _, version := range []int{68} { 577 peer := fmt.Sprintf("peer %d", version) 578 if _, ok := tester.peers[peer]; !ok { 579 t.Errorf("%s dropped", peer) 580 } 581 } 582 } 583 584 // Tests that if a block is empty (e.g. header only), no body request should be 585 // made, and instead the header should be assembled into a whole block in itself. 586 func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, zondproto.ETH68, FullSync) } 587 func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, zondproto.ETH68, SnapSync) } 588 589 func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { 590 success := make(chan struct{}) 591 tester := newTesterWithNotification(t, func() { 592 close(success) 593 }) 594 defer tester.terminate() 595 596 // Create a block chain to download 597 chain := testChainBase 598 tester.newPeer("peer", protocol, chain.blocks[1:]) 599 600 // Instrument the downloader to signal body requests 601 var bodiesHave, receiptsHave atomic.Int32 602 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 603 bodiesHave.Add(int32(len(headers))) 604 } 605 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 606 receiptsHave.Add(int32(len(headers))) 607 } 608 609 if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { 610 t.Fatalf("failed to synchronise blocks: %v", err) 611 } 612 select { 613 case <-success: 614 checkProgress(t, tester.downloader, "initial", zond.SyncProgress{ 615 HighestBlock: uint64(len(chain.blocks) - 1), 616 CurrentBlock: uint64(len(chain.blocks) - 1), 617 }) 618 case <-time.NewTimer(time.Second * 3).C: 619 t.Fatalf("Failed to sync chain in three seconds") 620 } 621 assertOwnChain(t, tester, len(chain.blocks)) 622 623 // Validate the number of block bodies that should have been requested 624 bodiesNeeded, receiptsNeeded := 0, 0 625 for _, block := range chain.blocks[1:] { 626 if len(block.Transactions()) > 0 { 627 bodiesNeeded++ 628 } 629 } 630 for _, block := range chain.blocks[1:] { 631 if mode == SnapSync && len(block.Transactions()) > 0 { 632 receiptsNeeded++ 633 } 634 } 635 if int(bodiesHave.Load()) != bodiesNeeded { 636 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded) 637 } 638 if int(receiptsHave.Load()) != receiptsNeeded { 639 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded) 640 } 641 } 642 643 func checkProgress(t *testing.T, d *Downloader, stage string, want zond.SyncProgress) { 644 // Mark this method as a helper to report errors at callsite, not in here 645 t.Helper() 646 647 p := d.Progress() 648 if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock { 649 t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) 650 } 651 } 652 653 // Tests that peers below a pre-configured checkpoint block are prevented from 654 // being fast-synced from, avoiding potential cheap eclipse attacks. 655 func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, zondproto.ETH68, FullSync) } 656 func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, zondproto.ETH68, SnapSync) } 657 658 func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { 659 var cases = []struct { 660 name string // The name of testing scenario 661 local int // The length of local chain(canonical chain assumed), 0 means genesis is the head 662 }{ 663 {name: "Beacon sync since genesis", local: 0}, 664 {name: "Beacon sync with short local chain", local: 1}, 665 {name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2}, 666 {name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1}, 667 } 668 for _, c := range cases { 669 t.Run(c.name, func(t *testing.T) { 670 success := make(chan struct{}) 671 tester := newTesterWithNotification(t, func() { 672 close(success) 673 }) 674 defer tester.terminate() 675 676 chain := testChainBase.shorten(blockCacheMaxItems - 15) 677 tester.newPeer("peer", protocol, chain.blocks[1:]) 678 679 // Build the local chain segment if it's required 680 if c.local > 0 { 681 tester.chain.InsertChain(chain.blocks[1 : c.local+1]) 682 } 683 if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { 684 t.Fatalf("Failed to beacon sync chain %v %v", c.name, err) 685 } 686 select { 687 case <-success: 688 // Ok, downloader fully cancelled after sync cycle 689 if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != len(chain.blocks) { 690 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks)) 691 } 692 case <-time.NewTimer(time.Second * 3).C: 693 t.Fatalf("Failed to sync chain in three seconds") 694 } 695 }) 696 } 697 } 698 699 // Tests that synchronisation progress (origin block number, current block number 700 // and highest block number) is tracked and updated correctly. 701 func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, zondproto.ETH68, FullSync) } 702 func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, zondproto.ETH68, SnapSync) } 703 704 func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 705 success := make(chan struct{}) 706 tester := newTesterWithNotification(t, func() { 707 success <- struct{}{} 708 }) 709 defer tester.terminate() 710 checkProgress(t, tester.downloader, "pristine", zond.SyncProgress{}) 711 712 chain := testChainBase.shorten(blockCacheMaxItems - 15) 713 shortChain := chain.shorten(len(chain.blocks) / 2).blocks[1:] 714 715 // Connect to peer that provides all headers and part of the bodies 716 faultyPeer := tester.newPeer("peer-half", protocol, shortChain) 717 for _, header := range shortChain { 718 faultyPeer.withholdBodies[header.Hash()] = struct{}{} 719 } 720 721 if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)/2-1].Header(), nil); err != nil { 722 t.Fatalf("failed to beacon-sync chain: %v", err) 723 } 724 select { 725 case <-success: 726 // Ok, downloader fully cancelled after sync cycle 727 checkProgress(t, tester.downloader, "peer-half", zond.SyncProgress{ 728 CurrentBlock: uint64(len(chain.blocks)/2 - 1), 729 HighestBlock: uint64(len(chain.blocks)/2 - 1), 730 }) 731 case <-time.NewTimer(time.Second * 3).C: 732 t.Fatalf("Failed to sync chain in three seconds") 733 } 734 735 // Synchronise all the blocks and check continuation progress 736 tester.newPeer("peer-full", protocol, chain.blocks[1:]) 737 if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { 738 t.Fatalf("failed to beacon-sync chain: %v", err) 739 } 740 startingBlock := uint64(len(chain.blocks)/2 - 1) 741 742 select { 743 case <-success: 744 // Ok, downloader fully cancelled after sync cycle 745 checkProgress(t, tester.downloader, "peer-full", zond.SyncProgress{ 746 StartingBlock: startingBlock, 747 CurrentBlock: uint64(len(chain.blocks) - 1), 748 HighestBlock: uint64(len(chain.blocks) - 1), 749 }) 750 case <-time.NewTimer(time.Second * 3).C: 751 t.Fatalf("Failed to sync chain in three seconds") 752 } 753 }