github.com/tmoore22/go-ethereum@v1.10.22-0.20220814113424-76f4d8bc4994/eth/downloader/downloader_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "os" 24 "strings" 25 "sync" 26 "sync/atomic" 27 "testing" 28 "time" 29 30 "github.com/ethereum/go-ethereum" 31 "github.com/ethereum/go-ethereum/common" 32 "github.com/ethereum/go-ethereum/consensus/ethash" 33 "github.com/ethereum/go-ethereum/core" 34 "github.com/ethereum/go-ethereum/core/rawdb" 35 "github.com/ethereum/go-ethereum/core/types" 36 "github.com/ethereum/go-ethereum/core/vm" 37 "github.com/ethereum/go-ethereum/eth/protocols/eth" 38 "github.com/ethereum/go-ethereum/eth/protocols/snap" 39 "github.com/ethereum/go-ethereum/event" 40 "github.com/ethereum/go-ethereum/log" 41 "github.com/ethereum/go-ethereum/params" 42 "github.com/ethereum/go-ethereum/rlp" 43 "github.com/ethereum/go-ethereum/trie" 44 ) 45 46 // downloadTester is a test simulator for mocking out local block chain. 47 type downloadTester struct { 48 freezer string 49 chain *core.BlockChain 50 downloader *Downloader 51 52 peers map[string]*downloadTesterPeer 53 lock sync.RWMutex 54 } 55 56 // newTester creates a new downloader test mocker. 57 func newTester(t *testing.T) *downloadTester { 58 return newTesterWithNotification(t, nil) 59 } 60 61 // newTester creates a new downloader test mocker. 62 func newTesterWithNotification(t *testing.T, success func()) *downloadTester { 63 freezer := t.TempDir() 64 db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) 65 if err != nil { 66 panic(err) 67 } 68 t.Cleanup(func() { 69 db.Close() 70 }) 71 gspec := core.Genesis{ 72 Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, 73 BaseFee: big.NewInt(params.InitialBaseFee), 74 } 75 gspec.MustCommit(db) 76 77 chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) 78 if err != nil { 79 panic(err) 80 } 81 tester := &downloadTester{ 82 freezer: freezer, 83 chain: chain, 84 peers: make(map[string]*downloadTesterPeer), 85 } 86 tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success) 87 return tester 88 } 89 90 // terminate aborts any operations on the embedded downloader and releases all 91 // held resources. 92 func (dl *downloadTester) terminate() { 93 dl.downloader.Terminate() 94 dl.chain.Stop() 95 96 os.RemoveAll(dl.freezer) 97 } 98 99 // sync starts synchronizing with a remote peer, blocking until it completes. 100 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 101 head := dl.peers[id].chain.CurrentBlock() 102 if td == nil { 103 // If no particular TD was requested, load from the peer's blockchain 104 td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64()) 105 } 106 // Synchronise with the chosen peer and ensure proper cleanup afterwards 107 err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil) 108 select { 109 case <-dl.downloader.cancelCh: 110 // Ok, downloader fully cancelled after sync cycle 111 default: 112 // Downloader is still accepting packets, can block a peer up 113 panic("downloader active post sync cycle") // panic will be caught by tester 114 } 115 return err 116 } 117 118 // newPeer registers a new block download source into the downloader. 119 func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer { 120 dl.lock.Lock() 121 defer dl.lock.Unlock() 122 123 peer := &downloadTesterPeer{ 124 dl: dl, 125 id: id, 126 chain: newTestBlockchain(blocks), 127 withholdHeaders: make(map[common.Hash]struct{}), 128 } 129 dl.peers[id] = peer 130 131 if err := dl.downloader.RegisterPeer(id, version, peer); err != nil { 132 panic(err) 133 } 134 if err := dl.downloader.SnapSyncer.Register(peer); err != nil { 135 panic(err) 136 } 137 return peer 138 } 139 140 // dropPeer simulates a hard peer removal from the connection pool. 141 func (dl *downloadTester) dropPeer(id string) { 142 dl.lock.Lock() 143 defer dl.lock.Unlock() 144 145 delete(dl.peers, id) 146 dl.downloader.SnapSyncer.Unregister(id) 147 dl.downloader.UnregisterPeer(id) 148 } 149 150 type downloadTesterPeer struct { 151 dl *downloadTester 152 id string 153 chain *core.BlockChain 154 155 withholdHeaders map[common.Hash]struct{} 156 } 157 158 // Head constructs a function to retrieve a peer's current head hash 159 // and total difficulty. 160 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 161 head := dlp.chain.CurrentBlock() 162 return head.Hash(), dlp.chain.GetTd(head.Hash(), head.NumberU64()) 163 } 164 165 func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { 166 var headers = make([]*types.Header, len(rlpdata)) 167 for i, data := range rlpdata { 168 var h types.Header 169 if err := rlp.DecodeBytes(data, &h); err != nil { 170 panic(err) 171 } 172 headers[i] = &h 173 } 174 return headers 175 } 176 177 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 178 // origin; associated with a particular peer in the download tester. The returned 179 // function can be used to retrieve batches of headers from the particular peer. 180 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { 181 // Service the header query via the live handler code 182 rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ 183 Origin: eth.HashOrNumber{ 184 Hash: origin, 185 }, 186 Amount: uint64(amount), 187 Skip: uint64(skip), 188 Reverse: reverse, 189 }, nil) 190 headers := unmarshalRlpHeaders(rlpHeaders) 191 // If a malicious peer is simulated withholding headers, delete them 192 for hash := range dlp.withholdHeaders { 193 for i, header := range headers { 194 if header.Hash() == hash { 195 headers = append(headers[:i], headers[i+1:]...) 196 break 197 } 198 } 199 } 200 hashes := make([]common.Hash, len(headers)) 201 for i, header := range headers { 202 hashes[i] = header.Hash() 203 } 204 // Deliver the headers to the downloader 205 req := ð.Request{ 206 Peer: dlp.id, 207 } 208 res := ð.Response{ 209 Req: req, 210 Res: (*eth.BlockHeadersPacket)(&headers), 211 Meta: hashes, 212 Time: 1, 213 Done: make(chan error, 1), // Ignore the returned status 214 } 215 go func() { 216 sink <- res 217 }() 218 return req, nil 219 } 220 221 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 222 // origin; associated with a particular peer in the download tester. The returned 223 // function can be used to retrieve batches of headers from the particular peer. 224 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { 225 // Service the header query via the live handler code 226 rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ 227 Origin: eth.HashOrNumber{ 228 Number: origin, 229 }, 230 Amount: uint64(amount), 231 Skip: uint64(skip), 232 Reverse: reverse, 233 }, nil) 234 headers := unmarshalRlpHeaders(rlpHeaders) 235 // If a malicious peer is simulated withholding headers, delete them 236 for hash := range dlp.withholdHeaders { 237 for i, header := range headers { 238 if header.Hash() == hash { 239 headers = append(headers[:i], headers[i+1:]...) 240 break 241 } 242 } 243 } 244 hashes := make([]common.Hash, len(headers)) 245 for i, header := range headers { 246 hashes[i] = header.Hash() 247 } 248 // Deliver the headers to the downloader 249 req := ð.Request{ 250 Peer: dlp.id, 251 } 252 res := ð.Response{ 253 Req: req, 254 Res: (*eth.BlockHeadersPacket)(&headers), 255 Meta: hashes, 256 Time: 1, 257 Done: make(chan error, 1), // Ignore the returned status 258 } 259 go func() { 260 sink <- res 261 }() 262 return req, nil 263 } 264 265 // RequestBodies constructs a getBlockBodies method associated with a particular 266 // peer in the download tester. The returned function can be used to retrieve 267 // batches of block bodies from the particularly requested peer. 268 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { 269 blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes) 270 271 bodies := make([]*eth.BlockBody, len(blobs)) 272 for i, blob := range blobs { 273 bodies[i] = new(eth.BlockBody) 274 rlp.DecodeBytes(blob, bodies[i]) 275 } 276 var ( 277 txsHashes = make([]common.Hash, len(bodies)) 278 uncleHashes = make([]common.Hash, len(bodies)) 279 ) 280 hasher := trie.NewStackTrie(nil) 281 for i, body := range bodies { 282 txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) 283 uncleHashes[i] = types.CalcUncleHash(body.Uncles) 284 } 285 req := ð.Request{ 286 Peer: dlp.id, 287 } 288 res := ð.Response{ 289 Req: req, 290 Res: (*eth.BlockBodiesPacket)(&bodies), 291 Meta: [][]common.Hash{txsHashes, uncleHashes}, 292 Time: 1, 293 Done: make(chan error, 1), // Ignore the returned status 294 } 295 go func() { 296 sink <- res 297 }() 298 return req, nil 299 } 300 301 // RequestReceipts constructs a getReceipts method associated with a particular 302 // peer in the download tester. The returned function can be used to retrieve 303 // batches of block receipts from the particularly requested peer. 304 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { 305 blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes) 306 307 receipts := make([][]*types.Receipt, len(blobs)) 308 for i, blob := range blobs { 309 rlp.DecodeBytes(blob, &receipts[i]) 310 } 311 hasher := trie.NewStackTrie(nil) 312 hashes = make([]common.Hash, len(receipts)) 313 for i, receipt := range receipts { 314 hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) 315 } 316 req := ð.Request{ 317 Peer: dlp.id, 318 } 319 res := ð.Response{ 320 Req: req, 321 Res: (*eth.ReceiptsPacket)(&receipts), 322 Meta: hashes, 323 Time: 1, 324 Done: make(chan error, 1), // Ignore the returned status 325 } 326 go func() { 327 sink <- res 328 }() 329 return req, nil 330 } 331 332 // ID retrieves the peer's unique identifier. 333 func (dlp *downloadTesterPeer) ID() string { 334 return dlp.id 335 } 336 337 // RequestAccountRange fetches a batch of accounts rooted in a specific account 338 // trie, starting with the origin. 339 func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { 340 // Create the request and service it 341 req := &snap.GetAccountRangePacket{ 342 ID: id, 343 Root: root, 344 Origin: origin, 345 Limit: limit, 346 Bytes: bytes, 347 } 348 slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req) 349 350 // We need to convert to non-slim format, delegate to the packet code 351 res := &snap.AccountRangePacket{ 352 ID: id, 353 Accounts: slimaccs, 354 Proof: proofs, 355 } 356 hashes, accounts, _ := res.Unpack() 357 358 go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs) 359 return nil 360 } 361 362 // RequestStorageRanges fetches a batch of storage slots belonging to one or 363 // more accounts. If slots from only one accout is requested, an origin marker 364 // may also be used to retrieve from there. 365 func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { 366 // Create the request and service it 367 req := &snap.GetStorageRangesPacket{ 368 ID: id, 369 Accounts: accounts, 370 Root: root, 371 Origin: origin, 372 Limit: limit, 373 Bytes: bytes, 374 } 375 storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req) 376 377 // We need to convert to demultiplex, delegate to the packet code 378 res := &snap.StorageRangesPacket{ 379 ID: id, 380 Slots: storage, 381 Proof: proofs, 382 } 383 hashes, slots := res.Unpack() 384 385 go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs) 386 return nil 387 } 388 389 // RequestByteCodes fetches a batch of bytecodes by hash. 390 func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { 391 req := &snap.GetByteCodesPacket{ 392 ID: id, 393 Hashes: hashes, 394 Bytes: bytes, 395 } 396 codes := snap.ServiceGetByteCodesQuery(dlp.chain, req) 397 go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes) 398 return nil 399 } 400 401 // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in 402 // a specificstate trie. 403 func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error { 404 req := &snap.GetTrieNodesPacket{ 405 ID: id, 406 Root: root, 407 Paths: paths, 408 Bytes: bytes, 409 } 410 nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now()) 411 go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes) 412 return nil 413 } 414 415 // Log retrieves the peer's own contextual logger. 416 func (dlp *downloadTesterPeer) Log() log.Logger { 417 return log.New("peer", dlp.id) 418 } 419 420 // assertOwnChain checks if the local chain contains the correct number of items 421 // of the various chain components. 422 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 423 // Mark this method as a helper to report errors at callsite, not in here 424 t.Helper() 425 426 headers, blocks, receipts := length, length, length 427 if tester.downloader.getMode() == LightSync { 428 blocks, receipts = 1, 1 429 } 430 if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers { 431 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 432 } 433 if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != blocks { 434 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 435 } 436 if rs := int(tester.chain.CurrentFastBlock().NumberU64()) + 1; rs != receipts { 437 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 438 } 439 } 440 441 func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } 442 func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } 443 func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } 444 func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) } 445 func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) } 446 func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) } 447 448 func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { 449 tester := newTester(t) 450 defer tester.terminate() 451 452 // Create a small enough block chain to download 453 chain := testChainBase.shorten(blockCacheMaxItems - 15) 454 tester.newPeer("peer", protocol, chain.blocks[1:]) 455 456 // Synchronise with the peer and make sure all relevant data was retrieved 457 if err := tester.sync("peer", nil, mode); err != nil { 458 t.Fatalf("failed to synchronise blocks: %v", err) 459 } 460 assertOwnChain(t, tester, len(chain.blocks)) 461 } 462 463 // Tests that if a large batch of blocks are being downloaded, it is throttled 464 // until the cached blocks are retrieved. 465 func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } 466 func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } 467 func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) } 468 func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) } 469 470 func testThrottling(t *testing.T, protocol uint, mode SyncMode) { 471 tester := newTester(t) 472 defer tester.terminate() 473 474 // Create a long block chain to download and the tester 475 targetBlocks := len(testChainBase.blocks) - 1 476 tester.newPeer("peer", protocol, testChainBase.blocks[1:]) 477 478 // Wrap the importer to allow stepping 479 blocked, proceed := uint32(0), make(chan struct{}) 480 tester.downloader.chainInsertHook = func(results []*fetchResult) { 481 atomic.StoreUint32(&blocked, uint32(len(results))) 482 <-proceed 483 } 484 // Start a synchronisation concurrently 485 errc := make(chan error, 1) 486 go func() { 487 errc <- tester.sync("peer", nil, mode) 488 }() 489 // Iteratively take some blocks, always checking the retrieval count 490 for { 491 // Check the retrieval count synchronously (! reason for this ugly block) 492 tester.lock.RLock() 493 retrieved := int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 494 tester.lock.RUnlock() 495 if retrieved >= targetBlocks+1 { 496 break 497 } 498 // Wait a bit for sync to throttle itself 499 var cached, frozen int 500 for start := time.Now(); time.Since(start) < 3*time.Second; { 501 time.Sleep(25 * time.Millisecond) 502 503 tester.lock.Lock() 504 tester.downloader.queue.lock.Lock() 505 tester.downloader.queue.resultCache.lock.Lock() 506 { 507 cached = tester.downloader.queue.resultCache.countCompleted() 508 frozen = int(atomic.LoadUint32(&blocked)) 509 retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 510 } 511 tester.downloader.queue.resultCache.lock.Unlock() 512 tester.downloader.queue.lock.Unlock() 513 tester.lock.Unlock() 514 515 if cached == blockCacheMaxItems || 516 cached == blockCacheMaxItems-reorgProtHeaderDelay || 517 retrieved+cached+frozen == targetBlocks+1 || 518 retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { 519 break 520 } 521 } 522 // Make sure we filled up the cache, then exhaust it 523 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 524 tester.lock.RLock() 525 retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 526 tester.lock.RUnlock() 527 if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { 528 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) 529 } 530 // Permit the blocked blocks to import 531 if atomic.LoadUint32(&blocked) > 0 { 532 atomic.StoreUint32(&blocked, uint32(0)) 533 proceed <- struct{}{} 534 } 535 } 536 // Check that we haven't pulled more blocks than available 537 assertOwnChain(t, tester, targetBlocks+1) 538 if err := <-errc; err != nil { 539 t.Fatalf("block synchronization failed: %v", err) 540 } 541 } 542 543 // Tests that simple synchronization against a forked chain works correctly. In 544 // this test common ancestor lookup should *not* be short circuited, and a full 545 // binary search should be executed. 546 func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } 547 func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } 548 func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } 549 func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) } 550 func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) } 551 func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) } 552 553 func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { 554 tester := newTester(t) 555 defer tester.terminate() 556 557 chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) 558 chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81) 559 tester.newPeer("fork A", protocol, chainA.blocks[1:]) 560 tester.newPeer("fork B", protocol, chainB.blocks[1:]) 561 // Synchronise with the peer and make sure all blocks were retrieved 562 if err := tester.sync("fork A", nil, mode); err != nil { 563 t.Fatalf("failed to synchronise blocks: %v", err) 564 } 565 assertOwnChain(t, tester, len(chainA.blocks)) 566 567 // Synchronise with the second peer and make sure that fork is pulled too 568 if err := tester.sync("fork B", nil, mode); err != nil { 569 t.Fatalf("failed to synchronise blocks: %v", err) 570 } 571 assertOwnChain(t, tester, len(chainB.blocks)) 572 } 573 574 // Tests that synchronising against a much shorter but much heavyer fork works 575 // corrently and is not dropped. 576 func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } 577 func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } 578 func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } 579 func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) } 580 func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) } 581 func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) } 582 583 func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { 584 tester := newTester(t) 585 defer tester.terminate() 586 587 chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) 588 chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79) 589 tester.newPeer("light", protocol, chainA.blocks[1:]) 590 tester.newPeer("heavy", protocol, chainB.blocks[1:]) 591 592 // Synchronise with the peer and make sure all blocks were retrieved 593 if err := tester.sync("light", nil, mode); err != nil { 594 t.Fatalf("failed to synchronise blocks: %v", err) 595 } 596 assertOwnChain(t, tester, len(chainA.blocks)) 597 598 // Synchronise with the second peer and make sure that fork is pulled too 599 if err := tester.sync("heavy", nil, mode); err != nil { 600 t.Fatalf("failed to synchronise blocks: %v", err) 601 } 602 assertOwnChain(t, tester, len(chainB.blocks)) 603 } 604 605 // Tests that chain forks are contained within a certain interval of the current 606 // chain head, ensuring that malicious peers cannot waste resources by feeding 607 // long dead chains. 608 func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } 609 func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } 610 func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } 611 func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) } 612 func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) } 613 func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) } 614 615 func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { 616 tester := newTester(t) 617 defer tester.terminate() 618 619 chainA := testChainForkLightA 620 chainB := testChainForkLightB 621 tester.newPeer("original", protocol, chainA.blocks[1:]) 622 tester.newPeer("rewriter", protocol, chainB.blocks[1:]) 623 624 // Synchronise with the peer and make sure all blocks were retrieved 625 if err := tester.sync("original", nil, mode); err != nil { 626 t.Fatalf("failed to synchronise blocks: %v", err) 627 } 628 assertOwnChain(t, tester, len(chainA.blocks)) 629 630 // Synchronise with the second peer and ensure that the fork is rejected to being too old 631 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 632 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 633 } 634 } 635 636 // Tests that chain forks are contained within a certain interval of the current 637 // chain head for short but heavy forks too. These are a bit special because they 638 // take different ancestor lookup paths. 639 func TestBoundedHeavyForkedSync66Full(t *testing.T) { 640 testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) 641 } 642 func TestBoundedHeavyForkedSync66Snap(t *testing.T) { 643 testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) 644 } 645 func TestBoundedHeavyForkedSync66Light(t *testing.T) { 646 testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) 647 } 648 func TestBoundedHeavyForkedSync67Full(t *testing.T) { 649 testBoundedHeavyForkedSync(t, eth.ETH67, FullSync) 650 } 651 func TestBoundedHeavyForkedSync67Snap(t *testing.T) { 652 testBoundedHeavyForkedSync(t, eth.ETH67, SnapSync) 653 } 654 func TestBoundedHeavyForkedSync67Light(t *testing.T) { 655 testBoundedHeavyForkedSync(t, eth.ETH67, LightSync) 656 } 657 658 func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { 659 tester := newTester(t) 660 defer tester.terminate() 661 662 // Create a long enough forked chain 663 chainA := testChainForkLightA 664 chainB := testChainForkHeavy 665 tester.newPeer("original", protocol, chainA.blocks[1:]) 666 667 // Synchronise with the peer and make sure all blocks were retrieved 668 if err := tester.sync("original", nil, mode); err != nil { 669 t.Fatalf("failed to synchronise blocks: %v", err) 670 } 671 assertOwnChain(t, tester, len(chainA.blocks)) 672 673 tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:]) 674 // Synchronise with the second peer and ensure that the fork is rejected to being too old 675 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 676 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 677 } 678 } 679 680 // Tests that a canceled download wipes all previously accumulated state. 681 func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } 682 func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } 683 func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } 684 func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) } 685 func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) } 686 func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) } 687 688 func testCancel(t *testing.T, protocol uint, mode SyncMode) { 689 tester := newTester(t) 690 defer tester.terminate() 691 692 chain := testChainBase.shorten(MaxHeaderFetch) 693 tester.newPeer("peer", protocol, chain.blocks[1:]) 694 695 // Make sure canceling works with a pristine downloader 696 tester.downloader.Cancel() 697 if !tester.downloader.queue.Idle() { 698 t.Errorf("download queue not idle") 699 } 700 // Synchronise with the peer, but cancel afterwards 701 if err := tester.sync("peer", nil, mode); err != nil { 702 t.Fatalf("failed to synchronise blocks: %v", err) 703 } 704 tester.downloader.Cancel() 705 if !tester.downloader.queue.Idle() { 706 t.Errorf("download queue not idle") 707 } 708 } 709 710 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 711 func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } 712 func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } 713 func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } 714 func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) } 715 func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) } 716 func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) } 717 718 func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { 719 tester := newTester(t) 720 defer tester.terminate() 721 722 // Create various peers with various parts of the chain 723 targetPeers := 8 724 chain := testChainBase.shorten(targetPeers * 100) 725 726 for i := 0; i < targetPeers; i++ { 727 id := fmt.Sprintf("peer #%d", i) 728 tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:]) 729 } 730 if err := tester.sync("peer #0", nil, mode); err != nil { 731 t.Fatalf("failed to synchronise blocks: %v", err) 732 } 733 assertOwnChain(t, tester, len(chain.blocks)) 734 } 735 736 // Tests that synchronisations behave well in multi-version protocol environments 737 // and not wreak havoc on other nodes in the network. 738 func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } 739 func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } 740 func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } 741 func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) } 742 func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) } 743 func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) } 744 745 func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { 746 tester := newTester(t) 747 defer tester.terminate() 748 749 // Create a small enough block chain to download 750 chain := testChainBase.shorten(blockCacheMaxItems - 15) 751 752 // Create peers of every type 753 tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) 754 tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:]) 755 756 // Synchronise with the requested peer and make sure all blocks were retrieved 757 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 758 t.Fatalf("failed to synchronise blocks: %v", err) 759 } 760 assertOwnChain(t, tester, len(chain.blocks)) 761 762 // Check that no peers have been dropped off 763 for _, version := range []int{66, 67} { 764 peer := fmt.Sprintf("peer %d", version) 765 if _, ok := tester.peers[peer]; !ok { 766 t.Errorf("%s dropped", peer) 767 } 768 } 769 } 770 771 // Tests that if a block is empty (e.g. header only), no body request should be 772 // made, and instead the header should be assembled into a whole block in itself. 773 func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } 774 func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } 775 func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } 776 func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) } 777 func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) } 778 func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) } 779 780 func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { 781 tester := newTester(t) 782 defer tester.terminate() 783 784 // Create a block chain to download 785 chain := testChainBase 786 tester.newPeer("peer", protocol, chain.blocks[1:]) 787 788 // Instrument the downloader to signal body requests 789 bodiesHave, receiptsHave := int32(0), int32(0) 790 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 791 atomic.AddInt32(&bodiesHave, int32(len(headers))) 792 } 793 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 794 atomic.AddInt32(&receiptsHave, int32(len(headers))) 795 } 796 // Synchronise with the peer and make sure all blocks were retrieved 797 if err := tester.sync("peer", nil, mode); err != nil { 798 t.Fatalf("failed to synchronise blocks: %v", err) 799 } 800 assertOwnChain(t, tester, len(chain.blocks)) 801 802 // Validate the number of block bodies that should have been requested 803 bodiesNeeded, receiptsNeeded := 0, 0 804 for _, block := range chain.blocks[1:] { 805 if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 806 bodiesNeeded++ 807 } 808 } 809 for _, block := range chain.blocks[1:] { 810 if mode == SnapSync && len(block.Transactions()) > 0 { 811 receiptsNeeded++ 812 } 813 } 814 if int(bodiesHave) != bodiesNeeded { 815 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 816 } 817 if int(receiptsHave) != receiptsNeeded { 818 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) 819 } 820 } 821 822 // Tests that headers are enqueued continuously, preventing malicious nodes from 823 // stalling the downloader by feeding gapped header chains. 824 func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } 825 func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } 826 func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } 827 func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) } 828 func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) } 829 func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) } 830 831 func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { 832 tester := newTester(t) 833 defer tester.terminate() 834 835 chain := testChainBase.shorten(blockCacheMaxItems - 15) 836 837 attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) 838 attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{} 839 840 if err := tester.sync("attack", nil, mode); err == nil { 841 t.Fatalf("succeeded attacker synchronisation") 842 } 843 // Synchronise with the valid peer and make sure sync succeeds 844 tester.newPeer("valid", protocol, chain.blocks[1:]) 845 if err := tester.sync("valid", nil, mode); err != nil { 846 t.Fatalf("failed to synchronise blocks: %v", err) 847 } 848 assertOwnChain(t, tester, len(chain.blocks)) 849 } 850 851 // Tests that if requested headers are shifted (i.e. first is missing), the queue 852 // detects the invalid numbering. 853 func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } 854 func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } 855 func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } 856 func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) } 857 func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) } 858 func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) } 859 860 func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { 861 tester := newTester(t) 862 defer tester.terminate() 863 864 chain := testChainBase.shorten(blockCacheMaxItems - 15) 865 866 // Attempt a full sync with an attacker feeding shifted headers 867 attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) 868 attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{} 869 870 if err := tester.sync("attack", nil, mode); err == nil { 871 t.Fatalf("succeeded attacker synchronisation") 872 } 873 // Synchronise with the valid peer and make sure sync succeeds 874 tester.newPeer("valid", protocol, chain.blocks[1:]) 875 if err := tester.sync("valid", nil, mode); err != nil { 876 t.Fatalf("failed to synchronise blocks: %v", err) 877 } 878 assertOwnChain(t, tester, len(chain.blocks)) 879 } 880 881 // Tests that upon detecting an invalid header, the recent ones are rolled back 882 // for various failure scenarios. Afterwards a full sync is attempted to make 883 // sure no state was corrupted. 884 func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) } 885 func TestInvalidHeaderRollback67Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH67, SnapSync) } 886 887 func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { 888 tester := newTester(t) 889 defer tester.terminate() 890 891 // Create a small enough block chain to download 892 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 893 chain := testChainBase.shorten(targetBlocks) 894 895 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 896 // This should result in the last fsHeaderSafetyNet headers being rolled back. 897 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 898 899 fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:]) 900 fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} 901 902 if err := tester.sync("fast-attack", nil, mode); err == nil { 903 t.Fatalf("succeeded fast attacker synchronisation") 904 } 905 if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { 906 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 907 } 908 // Attempt to sync with an attacker that feeds junk during the block import phase. 909 // This should result in both the last fsHeaderSafetyNet number of headers being 910 // rolled back, and also the pivot point being reverted to a non-block status. 911 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 912 913 blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:]) 914 fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in 915 blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} 916 917 if err := tester.sync("block-attack", nil, mode); err == nil { 918 t.Fatalf("succeeded block attacker synchronisation") 919 } 920 if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 921 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 922 } 923 if mode == SnapSync { 924 if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { 925 t.Errorf("fast sync pivot block #%d not rolled back", head) 926 } 927 } 928 // Attempt to sync with an attacker that withholds promised blocks after the 929 // fast sync pivot point. This could be a trial to leave the node with a bad 930 // but already imported pivot block. 931 withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:]) 932 933 tester.downloader.syncInitHook = func(uint64, uint64) { 934 for i := missing; i < len(chain.blocks); i++ { 935 withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} 936 } 937 tester.downloader.syncInitHook = nil 938 } 939 if err := tester.sync("withhold-attack", nil, mode); err == nil { 940 t.Fatalf("succeeded withholding attacker synchronisation") 941 } 942 if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 943 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 944 } 945 if mode == SnapSync { 946 if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { 947 t.Errorf("fast sync pivot block #%d not rolled back", head) 948 } 949 } 950 // Synchronise with the valid peer and make sure sync succeeds. Since the last rollback 951 // should also disable fast syncing for this process, verify that we did a fresh full 952 // sync. Note, we can't assert anything about the receipts since we won't purge the 953 // database of them, hence we can't use assertOwnChain. 954 tester.newPeer("valid", protocol, chain.blocks[1:]) 955 if err := tester.sync("valid", nil, mode); err != nil { 956 t.Fatalf("failed to synchronise blocks: %v", err) 957 } 958 assertOwnChain(t, tester, len(chain.blocks)) 959 } 960 961 // Tests that a peer advertising a high TD doesn't get to stall the downloader 962 // afterwards by not sending any useful hashes. 963 func TestHighTDStarvationAttack66Full(t *testing.T) { 964 testHighTDStarvationAttack(t, eth.ETH66, FullSync) 965 } 966 func TestHighTDStarvationAttack66Snap(t *testing.T) { 967 testHighTDStarvationAttack(t, eth.ETH66, SnapSync) 968 } 969 func TestHighTDStarvationAttack66Light(t *testing.T) { 970 testHighTDStarvationAttack(t, eth.ETH66, LightSync) 971 } 972 func TestHighTDStarvationAttack67Full(t *testing.T) { 973 testHighTDStarvationAttack(t, eth.ETH67, FullSync) 974 } 975 func TestHighTDStarvationAttack67Snap(t *testing.T) { 976 testHighTDStarvationAttack(t, eth.ETH67, SnapSync) 977 } 978 func TestHighTDStarvationAttack67Light(t *testing.T) { 979 testHighTDStarvationAttack(t, eth.ETH67, LightSync) 980 } 981 982 func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { 983 tester := newTester(t) 984 defer tester.terminate() 985 986 chain := testChainBase.shorten(1) 987 tester.newPeer("attack", protocol, chain.blocks[1:]) 988 if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { 989 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 990 } 991 } 992 993 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 994 func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } 995 func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) } 996 997 func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { 998 // Define the disconnection requirement for individual hash fetch errors 999 tests := []struct { 1000 result error 1001 drop bool 1002 }{ 1003 {nil, false}, // Sync succeeded, all is well 1004 {errBusy, false}, // Sync is already in progress, no problem 1005 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1006 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1007 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1008 {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it 1009 {errNoPeers, false}, // No peers to download from, soft race, no issue 1010 {errTimeout, true}, // No hashes received in due time, drop the peer 1011 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1012 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1013 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1014 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1015 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1016 {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin 1017 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1018 } 1019 // Run the tests and check disconnection status 1020 tester := newTester(t) 1021 defer tester.terminate() 1022 chain := testChainBase.shorten(1) 1023 1024 for i, tt := range tests { 1025 // Register a new peer and ensure its presence 1026 id := fmt.Sprintf("test %d", i) 1027 tester.newPeer(id, protocol, chain.blocks[1:]) 1028 if _, ok := tester.peers[id]; !ok { 1029 t.Fatalf("test %d: registered peer not found", i) 1030 } 1031 // Simulate a synchronisation and check the required result 1032 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1033 1034 tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync) 1035 if _, ok := tester.peers[id]; !ok != tt.drop { 1036 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1037 } 1038 } 1039 } 1040 1041 // Tests that synchronisation progress (origin block number, current block number 1042 // and highest block number) is tracked and updated correctly. 1043 func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } 1044 func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } 1045 func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } 1046 func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) } 1047 func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) } 1048 func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) } 1049 1050 func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1051 tester := newTester(t) 1052 defer tester.terminate() 1053 1054 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1055 1056 // Set a sync init hook to catch progress changes 1057 starting := make(chan struct{}) 1058 progress := make(chan struct{}) 1059 1060 tester.downloader.syncInitHook = func(origin, latest uint64) { 1061 starting <- struct{}{} 1062 <-progress 1063 } 1064 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1065 1066 // Synchronise half the blocks and check initial progress 1067 tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:]) 1068 pending := new(sync.WaitGroup) 1069 pending.Add(1) 1070 1071 go func() { 1072 defer pending.Done() 1073 if err := tester.sync("peer-half", nil, mode); err != nil { 1074 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1075 } 1076 }() 1077 <-starting 1078 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1079 HighestBlock: uint64(len(chain.blocks)/2 - 1), 1080 }) 1081 progress <- struct{}{} 1082 pending.Wait() 1083 1084 // Synchronise all the blocks and check continuation progress 1085 tester.newPeer("peer-full", protocol, chain.blocks[1:]) 1086 pending.Add(1) 1087 go func() { 1088 defer pending.Done() 1089 if err := tester.sync("peer-full", nil, mode); err != nil { 1090 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1091 } 1092 }() 1093 <-starting 1094 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1095 StartingBlock: uint64(len(chain.blocks)/2 - 1), 1096 CurrentBlock: uint64(len(chain.blocks)/2 - 1), 1097 HighestBlock: uint64(len(chain.blocks) - 1), 1098 }) 1099 1100 // Check final progress after successful sync 1101 progress <- struct{}{} 1102 pending.Wait() 1103 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1104 StartingBlock: uint64(len(chain.blocks)/2 - 1), 1105 CurrentBlock: uint64(len(chain.blocks) - 1), 1106 HighestBlock: uint64(len(chain.blocks) - 1), 1107 }) 1108 } 1109 1110 func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { 1111 // Mark this method as a helper to report errors at callsite, not in here 1112 t.Helper() 1113 1114 p := d.Progress() 1115 if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock { 1116 t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) 1117 } 1118 } 1119 1120 // Tests that synchronisation progress (origin block number and highest block 1121 // number) is tracked and updated correctly in case of a fork (or manual head 1122 // revertal). 1123 func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } 1124 func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } 1125 func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } 1126 func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) } 1127 func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) } 1128 func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) } 1129 1130 func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1131 tester := newTester(t) 1132 defer tester.terminate() 1133 1134 chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) 1135 chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) 1136 1137 // Set a sync init hook to catch progress changes 1138 starting := make(chan struct{}) 1139 progress := make(chan struct{}) 1140 1141 tester.downloader.syncInitHook = func(origin, latest uint64) { 1142 starting <- struct{}{} 1143 <-progress 1144 } 1145 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1146 1147 // Synchronise with one of the forks and check progress 1148 tester.newPeer("fork A", protocol, chainA.blocks[1:]) 1149 pending := new(sync.WaitGroup) 1150 pending.Add(1) 1151 go func() { 1152 defer pending.Done() 1153 if err := tester.sync("fork A", nil, mode); err != nil { 1154 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1155 } 1156 }() 1157 <-starting 1158 1159 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1160 HighestBlock: uint64(len(chainA.blocks) - 1), 1161 }) 1162 progress <- struct{}{} 1163 pending.Wait() 1164 1165 // Simulate a successful sync above the fork 1166 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1167 1168 // Synchronise with the second fork and check progress resets 1169 tester.newPeer("fork B", protocol, chainB.blocks[1:]) 1170 pending.Add(1) 1171 go func() { 1172 defer pending.Done() 1173 if err := tester.sync("fork B", nil, mode); err != nil { 1174 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1175 } 1176 }() 1177 <-starting 1178 checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ 1179 StartingBlock: uint64(len(testChainBase.blocks)) - 1, 1180 CurrentBlock: uint64(len(chainA.blocks) - 1), 1181 HighestBlock: uint64(len(chainB.blocks) - 1), 1182 }) 1183 1184 // Check final progress after successful sync 1185 progress <- struct{}{} 1186 pending.Wait() 1187 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1188 StartingBlock: uint64(len(testChainBase.blocks)) - 1, 1189 CurrentBlock: uint64(len(chainB.blocks) - 1), 1190 HighestBlock: uint64(len(chainB.blocks) - 1), 1191 }) 1192 } 1193 1194 // Tests that if synchronisation is aborted due to some failure, then the progress 1195 // origin is not updated in the next sync cycle, as it should be considered the 1196 // continuation of the previous sync and not a new instance. 1197 func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } 1198 func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } 1199 func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } 1200 func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) } 1201 func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) } 1202 func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) } 1203 1204 func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1205 tester := newTester(t) 1206 defer tester.terminate() 1207 1208 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1209 1210 // Set a sync init hook to catch progress changes 1211 starting := make(chan struct{}) 1212 progress := make(chan struct{}) 1213 1214 tester.downloader.syncInitHook = func(origin, latest uint64) { 1215 starting <- struct{}{} 1216 <-progress 1217 } 1218 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1219 1220 // Attempt a full sync with a faulty peer 1221 missing := len(chain.blocks)/2 - 1 1222 1223 faulter := tester.newPeer("faulty", protocol, chain.blocks[1:]) 1224 faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} 1225 1226 pending := new(sync.WaitGroup) 1227 pending.Add(1) 1228 go func() { 1229 defer pending.Done() 1230 if err := tester.sync("faulty", nil, mode); err == nil { 1231 panic("succeeded faulty synchronisation") 1232 } 1233 }() 1234 <-starting 1235 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1236 HighestBlock: uint64(len(chain.blocks) - 1), 1237 }) 1238 progress <- struct{}{} 1239 pending.Wait() 1240 afterFailedSync := tester.downloader.Progress() 1241 1242 // Synchronise with a good peer and check that the progress origin remind the same 1243 // after a failure 1244 tester.newPeer("valid", protocol, chain.blocks[1:]) 1245 pending.Add(1) 1246 go func() { 1247 defer pending.Done() 1248 if err := tester.sync("valid", nil, mode); err != nil { 1249 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1250 } 1251 }() 1252 <-starting 1253 checkProgress(t, tester.downloader, "completing", afterFailedSync) 1254 1255 // Check final progress after successful sync 1256 progress <- struct{}{} 1257 pending.Wait() 1258 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1259 CurrentBlock: uint64(len(chain.blocks) - 1), 1260 HighestBlock: uint64(len(chain.blocks) - 1), 1261 }) 1262 } 1263 1264 // Tests that if an attacker fakes a chain height, after the attack is detected, 1265 // the progress height is successfully reduced at the next sync invocation. 1266 func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } 1267 func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } 1268 func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } 1269 func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) } 1270 func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) } 1271 func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) } 1272 1273 func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1274 tester := newTester(t) 1275 defer tester.terminate() 1276 1277 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1278 1279 // Set a sync init hook to catch progress changes 1280 starting := make(chan struct{}) 1281 progress := make(chan struct{}) 1282 tester.downloader.syncInitHook = func(origin, latest uint64) { 1283 starting <- struct{}{} 1284 <-progress 1285 } 1286 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1287 1288 // Create and sync with an attacker that promises a higher chain than available. 1289 attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) 1290 numMissing := 5 1291 for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- { 1292 attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} 1293 } 1294 pending := new(sync.WaitGroup) 1295 pending.Add(1) 1296 go func() { 1297 defer pending.Done() 1298 if err := tester.sync("attack", nil, mode); err == nil { 1299 panic("succeeded attacker synchronisation") 1300 } 1301 }() 1302 <-starting 1303 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1304 HighestBlock: uint64(len(chain.blocks) - 1), 1305 }) 1306 progress <- struct{}{} 1307 pending.Wait() 1308 afterFailedSync := tester.downloader.Progress() 1309 1310 // Synchronise with a good peer and check that the progress height has been reduced to 1311 // the true value. 1312 validChain := chain.shorten(len(chain.blocks) - numMissing) 1313 tester.newPeer("valid", protocol, validChain.blocks[1:]) 1314 pending.Add(1) 1315 1316 go func() { 1317 defer pending.Done() 1318 if err := tester.sync("valid", nil, mode); err != nil { 1319 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1320 } 1321 }() 1322 <-starting 1323 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1324 CurrentBlock: afterFailedSync.CurrentBlock, 1325 HighestBlock: uint64(len(validChain.blocks) - 1), 1326 }) 1327 // Check final progress after successful sync. 1328 progress <- struct{}{} 1329 pending.Wait() 1330 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1331 CurrentBlock: uint64(len(validChain.blocks) - 1), 1332 HighestBlock: uint64(len(validChain.blocks) - 1), 1333 }) 1334 } 1335 1336 func TestRemoteHeaderRequestSpan(t *testing.T) { 1337 testCases := []struct { 1338 remoteHeight uint64 1339 localHeight uint64 1340 expected []int 1341 }{ 1342 // Remote is way higher. We should ask for the remote head and go backwards 1343 {1500, 1000, 1344 []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, 1345 }, 1346 {15000, 13006, 1347 []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, 1348 }, 1349 // Remote is pretty close to us. We don't have to fetch as many 1350 {1200, 1150, 1351 []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, 1352 }, 1353 // Remote is equal to us (so on a fork with higher td) 1354 // We should get the closest couple of ancestors 1355 {1500, 1500, 1356 []int{1497, 1499}, 1357 }, 1358 // We're higher than the remote! Odd 1359 {1000, 1500, 1360 []int{997, 999}, 1361 }, 1362 // Check some weird edgecases that it behaves somewhat rationally 1363 {0, 1500, 1364 []int{0, 2}, 1365 }, 1366 {6000000, 0, 1367 []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, 1368 }, 1369 {0, 0, 1370 []int{0, 2}, 1371 }, 1372 } 1373 reqs := func(from, count, span int) []int { 1374 var r []int 1375 num := from 1376 for len(r) < count { 1377 r = append(r, num) 1378 num += span + 1 1379 } 1380 return r 1381 } 1382 for i, tt := range testCases { 1383 from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) 1384 data := reqs(int(from), count, span) 1385 1386 if max != uint64(data[len(data)-1]) { 1387 t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) 1388 } 1389 failed := false 1390 if len(data) != len(tt.expected) { 1391 failed = true 1392 t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) 1393 } else { 1394 for j, n := range data { 1395 if n != tt.expected[j] { 1396 failed = true 1397 break 1398 } 1399 } 1400 } 1401 if failed { 1402 res := strings.ReplaceAll(fmt.Sprint(data), " ", ",") 1403 exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",") 1404 t.Logf("got: %v\n", res) 1405 t.Logf("exp: %v\n", exp) 1406 t.Errorf("test %d: wrong values", i) 1407 } 1408 } 1409 } 1410 1411 // Tests that peers below a pre-configured checkpoint block are prevented from 1412 // being fast-synced from, avoiding potential cheap eclipse attacks. 1413 func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) } 1414 func TestCheckpointEnforcement66Snap(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, SnapSync) } 1415 func TestCheckpointEnforcement66Light(t *testing.T) { 1416 testCheckpointEnforcement(t, eth.ETH66, LightSync) 1417 } 1418 func TestCheckpointEnforcement67Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH67, FullSync) } 1419 func TestCheckpointEnforcement67Snap(t *testing.T) { testCheckpointEnforcement(t, eth.ETH67, SnapSync) } 1420 func TestCheckpointEnforcement67Light(t *testing.T) { 1421 testCheckpointEnforcement(t, eth.ETH67, LightSync) 1422 } 1423 1424 func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { 1425 // Create a new tester with a particular hard coded checkpoint block 1426 tester := newTester(t) 1427 defer tester.terminate() 1428 1429 tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256 1430 chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) 1431 1432 // Attempt to sync with the peer and validate the result 1433 tester.newPeer("peer", protocol, chain.blocks[1:]) 1434 1435 var expect error 1436 if mode == SnapSync || mode == LightSync { 1437 expect = errUnsyncedPeer 1438 } 1439 if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) { 1440 t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) 1441 } 1442 if mode == SnapSync || mode == LightSync { 1443 assertOwnChain(t, tester, 1) 1444 } else { 1445 assertOwnChain(t, tester, len(chain.blocks)) 1446 } 1447 } 1448 1449 // Tests that peers below a pre-configured checkpoint block are prevented from 1450 // being fast-synced from, avoiding potential cheap eclipse attacks. 1451 func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } 1452 func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } 1453 1454 func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { 1455 //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) 1456 1457 var cases = []struct { 1458 name string // The name of testing scenario 1459 local int // The length of local chain(canonical chain assumed), 0 means genesis is the head 1460 }{ 1461 {name: "Beacon sync since genesis", local: 0}, 1462 {name: "Beacon sync with short local chain", local: 1}, 1463 {name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2}, 1464 {name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1}, 1465 } 1466 for _, c := range cases { 1467 t.Run(c.name, func(t *testing.T) { 1468 success := make(chan struct{}) 1469 tester := newTesterWithNotification(t, func() { 1470 close(success) 1471 }) 1472 defer tester.terminate() 1473 1474 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1475 tester.newPeer("peer", protocol, chain.blocks[1:]) 1476 1477 // Build the local chain segment if it's required 1478 if c.local > 0 { 1479 tester.chain.InsertChain(chain.blocks[1 : c.local+1]) 1480 } 1481 if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header()); err != nil { 1482 t.Fatalf("Failed to beacon sync chain %v %v", c.name, err) 1483 } 1484 select { 1485 case <-success: 1486 // Ok, downloader fully cancelled after sync cycle 1487 if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != len(chain.blocks) { 1488 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks)) 1489 } 1490 case <-time.NewTimer(time.Second * 3).C: 1491 t.Fatalf("Failed to sync chain in three seconds") 1492 } 1493 }) 1494 } 1495 }