github.com/ethxdao/go-ethereum@v0.0.0-20221218102228-5ae34a9cc189/eth/downloader/downloader_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "os" 24 "strings" 25 "sync" 26 "sync/atomic" 27 "testing" 28 "time" 29 30 "github.com/ethxdao/go-ethereum/common" 31 "github.com/ethxdao/go-ethereum/consensus/ethash" 32 "github.com/ethxdao/go-ethereum/core" 33 "github.com/ethxdao/go-ethereum/core/rawdb" 34 "github.com/ethxdao/go-ethereum/core/types" 35 "github.com/ethxdao/go-ethereum/core/vm" 36 "github.com/ethxdao/go-ethereum/eth/protocols/eth" 37 "github.com/ethxdao/go-ethereum/eth/protocols/snap" 38 "github.com/ethxdao/go-ethereum/event" 39 "github.com/ethxdao/go-ethereum/log" 40 "github.com/ethxdao/go-ethereum/params" 41 "github.com/ethxdao/go-ethereum/rlp" 42 "github.com/ethxdao/go-ethereum/trie" 43 ) 44 45 // downloadTester is a test simulator for mocking out local block chain. 46 type downloadTester struct { 47 freezer string 48 chain *core.BlockChain 49 downloader *Downloader 50 51 peers map[string]*downloadTesterPeer 52 lock sync.RWMutex 53 } 54 55 // newTester creates a new downloader test mocker. 56 func newTester(t *testing.T) *downloadTester { 57 return newTesterWithNotification(t, nil) 58 } 59 60 // newTester creates a new downloader test mocker. 61 func newTesterWithNotification(t *testing.T, success func()) *downloadTester { 62 freezer := t.TempDir() 63 db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) 64 if err != nil { 65 panic(err) 66 } 67 t.Cleanup(func() { 68 db.Close() 69 }) 70 gspec := core.Genesis{ 71 Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, 72 BaseFee: big.NewInt(params.InitialBaseFee), 73 } 74 gspec.MustCommit(db) 75 76 chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) 77 if err != nil { 78 panic(err) 79 } 80 tester := &downloadTester{ 81 freezer: freezer, 82 chain: chain, 83 peers: make(map[string]*downloadTesterPeer), 84 } 85 tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success) 86 return tester 87 } 88 89 // terminate aborts any operations on the embedded downloader and releases all 90 // held resources. 91 func (dl *downloadTester) terminate() { 92 dl.downloader.Terminate() 93 dl.chain.Stop() 94 95 os.RemoveAll(dl.freezer) 96 } 97 98 // sync starts synchronizing with a remote peer, blocking until it completes. 99 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 100 head := dl.peers[id].chain.CurrentBlock() 101 if td == nil { 102 // If no particular TD was requested, load from the peer's blockchain 103 td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64()) 104 } 105 // Synchronise with the chosen peer and ensure proper cleanup afterwards 106 err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil) 107 select { 108 case <-dl.downloader.cancelCh: 109 // Ok, downloader fully cancelled after sync cycle 110 default: 111 // Downloader is still accepting packets, can block a peer up 112 panic("downloader active post sync cycle") // panic will be caught by tester 113 } 114 return err 115 } 116 117 // newPeer registers a new block download source into the downloader. 118 func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer { 119 dl.lock.Lock() 120 defer dl.lock.Unlock() 121 122 peer := &downloadTesterPeer{ 123 dl: dl, 124 id: id, 125 chain: newTestBlockchain(blocks), 126 withholdHeaders: make(map[common.Hash]struct{}), 127 } 128 dl.peers[id] = peer 129 130 if err := dl.downloader.RegisterPeer(id, version, peer); err != nil { 131 panic(err) 132 } 133 if err := dl.downloader.SnapSyncer.Register(peer); err != nil { 134 panic(err) 135 } 136 return peer 137 } 138 139 // dropPeer simulates a hard peer removal from the connection pool. 140 func (dl *downloadTester) dropPeer(id string) { 141 dl.lock.Lock() 142 defer dl.lock.Unlock() 143 144 delete(dl.peers, id) 145 dl.downloader.SnapSyncer.Unregister(id) 146 dl.downloader.UnregisterPeer(id) 147 } 148 149 type downloadTesterPeer struct { 150 dl *downloadTester 151 id string 152 chain *core.BlockChain 153 154 withholdHeaders map[common.Hash]struct{} 155 } 156 157 // Head constructs a function to retrieve a peer's current head hash 158 // and total difficulty. 159 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 160 head := dlp.chain.CurrentBlock() 161 return head.Hash(), dlp.chain.GetTd(head.Hash(), head.NumberU64()) 162 } 163 164 func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { 165 var headers = make([]*types.Header, len(rlpdata)) 166 for i, data := range rlpdata { 167 var h types.Header 168 if err := rlp.DecodeBytes(data, &h); err != nil { 169 panic(err) 170 } 171 headers[i] = &h 172 } 173 return headers 174 } 175 176 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 177 // origin; associated with a particular peer in the download tester. The returned 178 // function can be used to retrieve batches of headers from the particular peer. 179 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { 180 // Service the header query via the live handler code 181 rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ 182 Origin: eth.HashOrNumber{ 183 Hash: origin, 184 }, 185 Amount: uint64(amount), 186 Skip: uint64(skip), 187 Reverse: reverse, 188 }, nil) 189 headers := unmarshalRlpHeaders(rlpHeaders) 190 // If a malicious peer is simulated withholding headers, delete them 191 for hash := range dlp.withholdHeaders { 192 for i, header := range headers { 193 if header.Hash() == hash { 194 headers = append(headers[:i], headers[i+1:]...) 195 break 196 } 197 } 198 } 199 hashes := make([]common.Hash, len(headers)) 200 for i, header := range headers { 201 hashes[i] = header.Hash() 202 } 203 // Deliver the headers to the downloader 204 req := ð.Request{ 205 Peer: dlp.id, 206 } 207 res := ð.Response{ 208 Req: req, 209 Res: (*eth.BlockHeadersPacket)(&headers), 210 Meta: hashes, 211 Time: 1, 212 Done: make(chan error, 1), // Ignore the returned status 213 } 214 go func() { 215 sink <- res 216 }() 217 return req, nil 218 } 219 220 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 221 // origin; associated with a particular peer in the download tester. The returned 222 // function can be used to retrieve batches of headers from the particular peer. 223 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { 224 // Service the header query via the live handler code 225 rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ 226 Origin: eth.HashOrNumber{ 227 Number: origin, 228 }, 229 Amount: uint64(amount), 230 Skip: uint64(skip), 231 Reverse: reverse, 232 }, nil) 233 headers := unmarshalRlpHeaders(rlpHeaders) 234 // If a malicious peer is simulated withholding headers, delete them 235 for hash := range dlp.withholdHeaders { 236 for i, header := range headers { 237 if header.Hash() == hash { 238 headers = append(headers[:i], headers[i+1:]...) 239 break 240 } 241 } 242 } 243 hashes := make([]common.Hash, len(headers)) 244 for i, header := range headers { 245 hashes[i] = header.Hash() 246 } 247 // Deliver the headers to the downloader 248 req := ð.Request{ 249 Peer: dlp.id, 250 } 251 res := ð.Response{ 252 Req: req, 253 Res: (*eth.BlockHeadersPacket)(&headers), 254 Meta: hashes, 255 Time: 1, 256 Done: make(chan error, 1), // Ignore the returned status 257 } 258 go func() { 259 sink <- res 260 }() 261 return req, nil 262 } 263 264 // RequestBodies constructs a getBlockBodies method associated with a particular 265 // peer in the download tester. The returned function can be used to retrieve 266 // batches of block bodies from the particularly requested peer. 267 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { 268 blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes) 269 270 bodies := make([]*eth.BlockBody, len(blobs)) 271 for i, blob := range blobs { 272 bodies[i] = new(eth.BlockBody) 273 rlp.DecodeBytes(blob, bodies[i]) 274 } 275 var ( 276 txsHashes = make([]common.Hash, len(bodies)) 277 uncleHashes = make([]common.Hash, len(bodies)) 278 ) 279 hasher := trie.NewStackTrie(nil) 280 for i, body := range bodies { 281 txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) 282 uncleHashes[i] = types.CalcUncleHash(body.Uncles) 283 } 284 req := ð.Request{ 285 Peer: dlp.id, 286 } 287 res := ð.Response{ 288 Req: req, 289 Res: (*eth.BlockBodiesPacket)(&bodies), 290 Meta: [][]common.Hash{txsHashes, uncleHashes}, 291 Time: 1, 292 Done: make(chan error, 1), // Ignore the returned status 293 } 294 go func() { 295 sink <- res 296 }() 297 return req, nil 298 } 299 300 // RequestReceipts constructs a getReceipts method associated with a particular 301 // peer in the download tester. The returned function can be used to retrieve 302 // batches of block receipts from the particularly requested peer. 303 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { 304 blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes) 305 306 receipts := make([][]*types.Receipt, len(blobs)) 307 for i, blob := range blobs { 308 rlp.DecodeBytes(blob, &receipts[i]) 309 } 310 hasher := trie.NewStackTrie(nil) 311 hashes = make([]common.Hash, len(receipts)) 312 for i, receipt := range receipts { 313 hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) 314 } 315 req := ð.Request{ 316 Peer: dlp.id, 317 } 318 res := ð.Response{ 319 Req: req, 320 Res: (*eth.ReceiptsPacket)(&receipts), 321 Meta: hashes, 322 Time: 1, 323 Done: make(chan error, 1), // Ignore the returned status 324 } 325 go func() { 326 sink <- res 327 }() 328 return req, nil 329 } 330 331 // ID retrieves the peer's unique identifier. 332 func (dlp *downloadTesterPeer) ID() string { 333 return dlp.id 334 } 335 336 // RequestAccountRange fetches a batch of accounts rooted in a specific account 337 // trie, starting with the origin. 338 func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { 339 // Create the request and service it 340 req := &snap.GetAccountRangePacket{ 341 ID: id, 342 Root: root, 343 Origin: origin, 344 Limit: limit, 345 Bytes: bytes, 346 } 347 slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req) 348 349 // We need to convert to non-slim format, delegate to the packet code 350 res := &snap.AccountRangePacket{ 351 ID: id, 352 Accounts: slimaccs, 353 Proof: proofs, 354 } 355 hashes, accounts, _ := res.Unpack() 356 357 go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs) 358 return nil 359 } 360 361 // RequestStorageRanges fetches a batch of storage slots belonging to one or 362 // more accounts. If slots from only one account is requested, an origin marker 363 // may also be used to retrieve from there. 364 func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { 365 // Create the request and service it 366 req := &snap.GetStorageRangesPacket{ 367 ID: id, 368 Accounts: accounts, 369 Root: root, 370 Origin: origin, 371 Limit: limit, 372 Bytes: bytes, 373 } 374 storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req) 375 376 // We need to convert to demultiplex, delegate to the packet code 377 res := &snap.StorageRangesPacket{ 378 ID: id, 379 Slots: storage, 380 Proof: proofs, 381 } 382 hashes, slots := res.Unpack() 383 384 go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs) 385 return nil 386 } 387 388 // RequestByteCodes fetches a batch of bytecodes by hash. 389 func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { 390 req := &snap.GetByteCodesPacket{ 391 ID: id, 392 Hashes: hashes, 393 Bytes: bytes, 394 } 395 codes := snap.ServiceGetByteCodesQuery(dlp.chain, req) 396 go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes) 397 return nil 398 } 399 400 // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in 401 // a specific state trie. 402 func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error { 403 req := &snap.GetTrieNodesPacket{ 404 ID: id, 405 Root: root, 406 Paths: paths, 407 Bytes: bytes, 408 } 409 nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now()) 410 go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes) 411 return nil 412 } 413 414 // Log retrieves the peer's own contextual logger. 415 func (dlp *downloadTesterPeer) Log() log.Logger { 416 return log.New("peer", dlp.id) 417 } 418 419 // assertOwnChain checks if the local chain contains the correct number of items 420 // of the various chain components. 421 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 422 // Mark this method as a helper to report errors at callsite, not in here 423 t.Helper() 424 425 headers, blocks, receipts := length, length, length 426 if tester.downloader.getMode() == LightSync { 427 blocks, receipts = 1, 1 428 } 429 if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers { 430 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 431 } 432 if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != blocks { 433 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 434 } 435 if rs := int(tester.chain.CurrentFastBlock().NumberU64()) + 1; rs != receipts { 436 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 437 } 438 } 439 440 func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } 441 func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } 442 func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } 443 func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) } 444 func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) } 445 func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) } 446 447 func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { 448 tester := newTester(t) 449 defer tester.terminate() 450 451 // Create a small enough block chain to download 452 chain := testChainBase.shorten(blockCacheMaxItems - 15) 453 tester.newPeer("peer", protocol, chain.blocks[1:]) 454 455 // Synchronise with the peer and make sure all relevant data was retrieved 456 if err := tester.sync("peer", nil, mode); err != nil { 457 t.Fatalf("failed to synchronise blocks: %v", err) 458 } 459 assertOwnChain(t, tester, len(chain.blocks)) 460 } 461 462 // Tests that if a large batch of blocks are being downloaded, it is throttled 463 // until the cached blocks are retrieved. 464 func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } 465 func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } 466 func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) } 467 func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) } 468 469 func testThrottling(t *testing.T, protocol uint, mode SyncMode) { 470 tester := newTester(t) 471 defer tester.terminate() 472 473 // Create a long block chain to download and the tester 474 targetBlocks := len(testChainBase.blocks) - 1 475 tester.newPeer("peer", protocol, testChainBase.blocks[1:]) 476 477 // Wrap the importer to allow stepping 478 blocked, proceed := uint32(0), make(chan struct{}) 479 tester.downloader.chainInsertHook = func(results []*fetchResult) { 480 atomic.StoreUint32(&blocked, uint32(len(results))) 481 <-proceed 482 } 483 // Start a synchronisation concurrently 484 errc := make(chan error, 1) 485 go func() { 486 errc <- tester.sync("peer", nil, mode) 487 }() 488 // Iteratively take some blocks, always checking the retrieval count 489 for { 490 // Check the retrieval count synchronously (! reason for this ugly block) 491 tester.lock.RLock() 492 retrieved := int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 493 tester.lock.RUnlock() 494 if retrieved >= targetBlocks+1 { 495 break 496 } 497 // Wait a bit for sync to throttle itself 498 var cached, frozen int 499 for start := time.Now(); time.Since(start) < 3*time.Second; { 500 time.Sleep(25 * time.Millisecond) 501 502 tester.lock.Lock() 503 tester.downloader.queue.lock.Lock() 504 tester.downloader.queue.resultCache.lock.Lock() 505 { 506 cached = tester.downloader.queue.resultCache.countCompleted() 507 frozen = int(atomic.LoadUint32(&blocked)) 508 retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 509 } 510 tester.downloader.queue.resultCache.lock.Unlock() 511 tester.downloader.queue.lock.Unlock() 512 tester.lock.Unlock() 513 514 if cached == blockCacheMaxItems || 515 cached == blockCacheMaxItems-reorgProtHeaderDelay || 516 retrieved+cached+frozen == targetBlocks+1 || 517 retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { 518 break 519 } 520 } 521 // Make sure we filled up the cache, then exhaust it 522 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 523 tester.lock.RLock() 524 retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 525 tester.lock.RUnlock() 526 if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { 527 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) 528 } 529 // Permit the blocked blocks to import 530 if atomic.LoadUint32(&blocked) > 0 { 531 atomic.StoreUint32(&blocked, uint32(0)) 532 proceed <- struct{}{} 533 } 534 } 535 // Check that we haven't pulled more blocks than available 536 assertOwnChain(t, tester, targetBlocks+1) 537 if err := <-errc; err != nil { 538 t.Fatalf("block synchronization failed: %v", err) 539 } 540 } 541 542 // Tests that simple synchronization against a forked chain works correctly. In 543 // this test common ancestor lookup should *not* be short circuited, and a full 544 // binary search should be executed. 545 func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } 546 func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } 547 func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } 548 func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) } 549 func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) } 550 func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) } 551 552 func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { 553 tester := newTester(t) 554 defer tester.terminate() 555 556 chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) 557 chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81) 558 tester.newPeer("fork A", protocol, chainA.blocks[1:]) 559 tester.newPeer("fork B", protocol, chainB.blocks[1:]) 560 // Synchronise with the peer and make sure all blocks were retrieved 561 if err := tester.sync("fork A", nil, mode); err != nil { 562 t.Fatalf("failed to synchronise blocks: %v", err) 563 } 564 assertOwnChain(t, tester, len(chainA.blocks)) 565 566 // Synchronise with the second peer and make sure that fork is pulled too 567 if err := tester.sync("fork B", nil, mode); err != nil { 568 t.Fatalf("failed to synchronise blocks: %v", err) 569 } 570 assertOwnChain(t, tester, len(chainB.blocks)) 571 } 572 573 // Tests that synchronising against a much shorter but much heavier fork works 574 // currently and is not dropped. 575 func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } 576 func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } 577 func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } 578 func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) } 579 func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) } 580 func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) } 581 582 func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { 583 tester := newTester(t) 584 defer tester.terminate() 585 586 chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) 587 chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79) 588 tester.newPeer("light", protocol, chainA.blocks[1:]) 589 tester.newPeer("heavy", protocol, chainB.blocks[1:]) 590 591 // Synchronise with the peer and make sure all blocks were retrieved 592 if err := tester.sync("light", nil, mode); err != nil { 593 t.Fatalf("failed to synchronise blocks: %v", err) 594 } 595 assertOwnChain(t, tester, len(chainA.blocks)) 596 597 // Synchronise with the second peer and make sure that fork is pulled too 598 if err := tester.sync("heavy", nil, mode); err != nil { 599 t.Fatalf("failed to synchronise blocks: %v", err) 600 } 601 assertOwnChain(t, tester, len(chainB.blocks)) 602 } 603 604 // Tests that chain forks are contained within a certain interval of the current 605 // chain head, ensuring that malicious peers cannot waste resources by feeding 606 // long dead chains. 607 func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } 608 func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } 609 func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } 610 func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) } 611 func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) } 612 func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) } 613 614 func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { 615 tester := newTester(t) 616 defer tester.terminate() 617 618 chainA := testChainForkLightA 619 chainB := testChainForkLightB 620 tester.newPeer("original", protocol, chainA.blocks[1:]) 621 tester.newPeer("rewriter", protocol, chainB.blocks[1:]) 622 623 // Synchronise with the peer and make sure all blocks were retrieved 624 if err := tester.sync("original", nil, mode); err != nil { 625 t.Fatalf("failed to synchronise blocks: %v", err) 626 } 627 assertOwnChain(t, tester, len(chainA.blocks)) 628 629 // Synchronise with the second peer and ensure that the fork is rejected to being too old 630 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 631 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 632 } 633 } 634 635 // Tests that chain forks are contained within a certain interval of the current 636 // chain head for short but heavy forks too. These are a bit special because they 637 // take different ancestor lookup paths. 638 func TestBoundedHeavyForkedSync66Full(t *testing.T) { 639 testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) 640 } 641 func TestBoundedHeavyForkedSync66Snap(t *testing.T) { 642 testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) 643 } 644 func TestBoundedHeavyForkedSync66Light(t *testing.T) { 645 testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) 646 } 647 func TestBoundedHeavyForkedSync67Full(t *testing.T) { 648 testBoundedHeavyForkedSync(t, eth.ETH67, FullSync) 649 } 650 func TestBoundedHeavyForkedSync67Snap(t *testing.T) { 651 testBoundedHeavyForkedSync(t, eth.ETH67, SnapSync) 652 } 653 func TestBoundedHeavyForkedSync67Light(t *testing.T) { 654 testBoundedHeavyForkedSync(t, eth.ETH67, LightSync) 655 } 656 657 func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { 658 tester := newTester(t) 659 defer tester.terminate() 660 661 // Create a long enough forked chain 662 chainA := testChainForkLightA 663 chainB := testChainForkHeavy 664 tester.newPeer("original", protocol, chainA.blocks[1:]) 665 666 // Synchronise with the peer and make sure all blocks were retrieved 667 if err := tester.sync("original", nil, mode); err != nil { 668 t.Fatalf("failed to synchronise blocks: %v", err) 669 } 670 assertOwnChain(t, tester, len(chainA.blocks)) 671 672 tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:]) 673 // Synchronise with the second peer and ensure that the fork is rejected to being too old 674 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 675 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 676 } 677 } 678 679 // Tests that a canceled download wipes all previously accumulated state. 680 func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } 681 func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } 682 func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } 683 func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) } 684 func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) } 685 func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) } 686 687 func testCancel(t *testing.T, protocol uint, mode SyncMode) { 688 tester := newTester(t) 689 defer tester.terminate() 690 691 chain := testChainBase.shorten(MaxHeaderFetch) 692 tester.newPeer("peer", protocol, chain.blocks[1:]) 693 694 // Make sure canceling works with a pristine downloader 695 tester.downloader.Cancel() 696 if !tester.downloader.queue.Idle() { 697 t.Errorf("download queue not idle") 698 } 699 // Synchronise with the peer, but cancel afterwards 700 if err := tester.sync("peer", nil, mode); err != nil { 701 t.Fatalf("failed to synchronise blocks: %v", err) 702 } 703 tester.downloader.Cancel() 704 if !tester.downloader.queue.Idle() { 705 t.Errorf("download queue not idle") 706 } 707 } 708 709 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 710 func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } 711 func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } 712 func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } 713 func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) } 714 func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) } 715 func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) } 716 717 func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { 718 tester := newTester(t) 719 defer tester.terminate() 720 721 // Create various peers with various parts of the chain 722 targetPeers := 8 723 chain := testChainBase.shorten(targetPeers * 100) 724 725 for i := 0; i < targetPeers; i++ { 726 id := fmt.Sprintf("peer #%d", i) 727 tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:]) 728 } 729 if err := tester.sync("peer #0", nil, mode); err != nil { 730 t.Fatalf("failed to synchronise blocks: %v", err) 731 } 732 assertOwnChain(t, tester, len(chain.blocks)) 733 } 734 735 // Tests that synchronisations behave well in multi-version protocol environments 736 // and not wreak havoc on other nodes in the network. 737 func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } 738 func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } 739 func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } 740 func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) } 741 func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) } 742 func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) } 743 744 func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { 745 tester := newTester(t) 746 defer tester.terminate() 747 748 // Create a small enough block chain to download 749 chain := testChainBase.shorten(blockCacheMaxItems - 15) 750 751 // Create peers of every type 752 tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) 753 tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:]) 754 755 // Synchronise with the requested peer and make sure all blocks were retrieved 756 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 757 t.Fatalf("failed to synchronise blocks: %v", err) 758 } 759 assertOwnChain(t, tester, len(chain.blocks)) 760 761 // Check that no peers have been dropped off 762 for _, version := range []int{66, 67} { 763 peer := fmt.Sprintf("peer %d", version) 764 if _, ok := tester.peers[peer]; !ok { 765 t.Errorf("%s dropped", peer) 766 } 767 } 768 } 769 770 // Tests that if a block is empty (e.g. header only), no body request should be 771 // made, and instead the header should be assembled into a whole block in itself. 772 func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } 773 func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } 774 func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } 775 func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) } 776 func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) } 777 func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) } 778 779 func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { 780 tester := newTester(t) 781 defer tester.terminate() 782 783 // Create a block chain to download 784 chain := testChainBase 785 tester.newPeer("peer", protocol, chain.blocks[1:]) 786 787 // Instrument the downloader to signal body requests 788 bodiesHave, receiptsHave := int32(0), int32(0) 789 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 790 atomic.AddInt32(&bodiesHave, int32(len(headers))) 791 } 792 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 793 atomic.AddInt32(&receiptsHave, int32(len(headers))) 794 } 795 // Synchronise with the peer and make sure all blocks were retrieved 796 if err := tester.sync("peer", nil, mode); err != nil { 797 t.Fatalf("failed to synchronise blocks: %v", err) 798 } 799 assertOwnChain(t, tester, len(chain.blocks)) 800 801 // Validate the number of block bodies that should have been requested 802 bodiesNeeded, receiptsNeeded := 0, 0 803 for _, block := range chain.blocks[1:] { 804 if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 805 bodiesNeeded++ 806 } 807 } 808 for _, block := range chain.blocks[1:] { 809 if mode == SnapSync && len(block.Transactions()) > 0 { 810 receiptsNeeded++ 811 } 812 } 813 if int(bodiesHave) != bodiesNeeded { 814 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 815 } 816 if int(receiptsHave) != receiptsNeeded { 817 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) 818 } 819 } 820 821 // Tests that headers are enqueued continuously, preventing malicious nodes from 822 // stalling the downloader by feeding gapped header chains. 823 func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } 824 func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } 825 func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } 826 func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) } 827 func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) } 828 func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) } 829 830 func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { 831 tester := newTester(t) 832 defer tester.terminate() 833 834 chain := testChainBase.shorten(blockCacheMaxItems - 15) 835 836 attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) 837 attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{} 838 839 if err := tester.sync("attack", nil, mode); err == nil { 840 t.Fatalf("succeeded attacker synchronisation") 841 } 842 // Synchronise with the valid peer and make sure sync succeeds 843 tester.newPeer("valid", protocol, chain.blocks[1:]) 844 if err := tester.sync("valid", nil, mode); err != nil { 845 t.Fatalf("failed to synchronise blocks: %v", err) 846 } 847 assertOwnChain(t, tester, len(chain.blocks)) 848 } 849 850 // Tests that if requested headers are shifted (i.e. first is missing), the queue 851 // detects the invalid numbering. 852 func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } 853 func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } 854 func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } 855 func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) } 856 func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) } 857 func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) } 858 859 func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { 860 tester := newTester(t) 861 defer tester.terminate() 862 863 chain := testChainBase.shorten(blockCacheMaxItems - 15) 864 865 // Attempt a full sync with an attacker feeding shifted headers 866 attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) 867 attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{} 868 869 if err := tester.sync("attack", nil, mode); err == nil { 870 t.Fatalf("succeeded attacker synchronisation") 871 } 872 // Synchronise with the valid peer and make sure sync succeeds 873 tester.newPeer("valid", protocol, chain.blocks[1:]) 874 if err := tester.sync("valid", nil, mode); err != nil { 875 t.Fatalf("failed to synchronise blocks: %v", err) 876 } 877 assertOwnChain(t, tester, len(chain.blocks)) 878 } 879 880 // Tests that upon detecting an invalid header, the recent ones are rolled back 881 // for various failure scenarios. Afterwards a full sync is attempted to make 882 // sure no state was corrupted. 883 func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) } 884 func TestInvalidHeaderRollback67Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH67, SnapSync) } 885 886 func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { 887 tester := newTester(t) 888 defer tester.terminate() 889 890 // Create a small enough block chain to download 891 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 892 chain := testChainBase.shorten(targetBlocks) 893 894 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 895 // This should result in the last fsHeaderSafetyNet headers being rolled back. 896 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 897 898 fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:]) 899 fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} 900 901 if err := tester.sync("fast-attack", nil, mode); err == nil { 902 t.Fatalf("succeeded fast attacker synchronisation") 903 } 904 if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { 905 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 906 } 907 // Attempt to sync with an attacker that feeds junk during the block import phase. 908 // This should result in both the last fsHeaderSafetyNet number of headers being 909 // rolled back, and also the pivot point being reverted to a non-block status. 910 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 911 912 blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:]) 913 fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in 914 blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} 915 916 if err := tester.sync("block-attack", nil, mode); err == nil { 917 t.Fatalf("succeeded block attacker synchronisation") 918 } 919 if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 920 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 921 } 922 if mode == SnapSync { 923 if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { 924 t.Errorf("fast sync pivot block #%d not rolled back", head) 925 } 926 } 927 // Attempt to sync with an attacker that withholds promised blocks after the 928 // fast sync pivot point. This could be a trial to leave the node with a bad 929 // but already imported pivot block. 930 withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:]) 931 932 tester.downloader.syncInitHook = func(uint64, uint64) { 933 for i := missing; i < len(chain.blocks); i++ { 934 withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} 935 } 936 tester.downloader.syncInitHook = nil 937 } 938 if err := tester.sync("withhold-attack", nil, mode); err == nil { 939 t.Fatalf("succeeded withholding attacker synchronisation") 940 } 941 if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 942 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 943 } 944 if mode == SnapSync { 945 if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { 946 t.Errorf("fast sync pivot block #%d not rolled back", head) 947 } 948 } 949 // Synchronise with the valid peer and make sure sync succeeds. Since the last rollback 950 // should also disable fast syncing for this process, verify that we did a fresh full 951 // sync. Note, we can't assert anything about the receipts since we won't purge the 952 // database of them, hence we can't use assertOwnChain. 953 tester.newPeer("valid", protocol, chain.blocks[1:]) 954 if err := tester.sync("valid", nil, mode); err != nil { 955 t.Fatalf("failed to synchronise blocks: %v", err) 956 } 957 assertOwnChain(t, tester, len(chain.blocks)) 958 } 959 960 // Tests that a peer advertising a high TD doesn't get to stall the downloader 961 // afterwards by not sending any useful hashes. 962 func TestHighTDStarvationAttack66Full(t *testing.T) { 963 testHighTDStarvationAttack(t, eth.ETH66, FullSync) 964 } 965 func TestHighTDStarvationAttack66Snap(t *testing.T) { 966 testHighTDStarvationAttack(t, eth.ETH66, SnapSync) 967 } 968 func TestHighTDStarvationAttack66Light(t *testing.T) { 969 testHighTDStarvationAttack(t, eth.ETH66, LightSync) 970 } 971 func TestHighTDStarvationAttack67Full(t *testing.T) { 972 testHighTDStarvationAttack(t, eth.ETH67, FullSync) 973 } 974 func TestHighTDStarvationAttack67Snap(t *testing.T) { 975 testHighTDStarvationAttack(t, eth.ETH67, SnapSync) 976 } 977 func TestHighTDStarvationAttack67Light(t *testing.T) { 978 testHighTDStarvationAttack(t, eth.ETH67, LightSync) 979 } 980 981 func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { 982 tester := newTester(t) 983 defer tester.terminate() 984 985 chain := testChainBase.shorten(1) 986 tester.newPeer("attack", protocol, chain.blocks[1:]) 987 if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { 988 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 989 } 990 } 991 992 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 993 func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } 994 func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) } 995 996 func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { 997 // Define the disconnection requirement for individual hash fetch errors 998 tests := []struct { 999 result error 1000 drop bool 1001 }{ 1002 {nil, false}, // Sync succeeded, all is well 1003 {errBusy, false}, // Sync is already in progress, no problem 1004 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1005 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1006 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1007 {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it 1008 {errNoPeers, false}, // No peers to download from, soft race, no issue 1009 {errTimeout, true}, // No hashes received in due time, drop the peer 1010 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1011 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1012 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1013 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1014 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1015 {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin 1016 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1017 } 1018 // Run the tests and check disconnection status 1019 tester := newTester(t) 1020 defer tester.terminate() 1021 chain := testChainBase.shorten(1) 1022 1023 for i, tt := range tests { 1024 // Register a new peer and ensure its presence 1025 id := fmt.Sprintf("test %d", i) 1026 tester.newPeer(id, protocol, chain.blocks[1:]) 1027 if _, ok := tester.peers[id]; !ok { 1028 t.Fatalf("test %d: registered peer not found", i) 1029 } 1030 // Simulate a synchronisation and check the required result 1031 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1032 1033 tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync) 1034 if _, ok := tester.peers[id]; !ok != tt.drop { 1035 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1036 } 1037 } 1038 } 1039 1040 // Tests that synchronisation progress (origin block number, current block number 1041 // and highest block number) is tracked and updated correctly. 1042 func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } 1043 func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } 1044 func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } 1045 func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) } 1046 func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) } 1047 func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) } 1048 1049 func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1050 tester := newTester(t) 1051 defer tester.terminate() 1052 1053 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1054 1055 // Set a sync init hook to catch progress changes 1056 starting := make(chan struct{}) 1057 progress := make(chan struct{}) 1058 1059 tester.downloader.syncInitHook = func(origin, latest uint64) { 1060 starting <- struct{}{} 1061 <-progress 1062 } 1063 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1064 1065 // Synchronise half the blocks and check initial progress 1066 tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:]) 1067 pending := new(sync.WaitGroup) 1068 pending.Add(1) 1069 1070 go func() { 1071 defer pending.Done() 1072 if err := tester.sync("peer-half", nil, mode); err != nil { 1073 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1074 } 1075 }() 1076 <-starting 1077 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1078 HighestBlock: uint64(len(chain.blocks)/2 - 1), 1079 }) 1080 progress <- struct{}{} 1081 pending.Wait() 1082 1083 // Synchronise all the blocks and check continuation progress 1084 tester.newPeer("peer-full", protocol, chain.blocks[1:]) 1085 pending.Add(1) 1086 go func() { 1087 defer pending.Done() 1088 if err := tester.sync("peer-full", nil, mode); err != nil { 1089 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1090 } 1091 }() 1092 <-starting 1093 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1094 StartingBlock: uint64(len(chain.blocks)/2 - 1), 1095 CurrentBlock: uint64(len(chain.blocks)/2 - 1), 1096 HighestBlock: uint64(len(chain.blocks) - 1), 1097 }) 1098 1099 // Check final progress after successful sync 1100 progress <- struct{}{} 1101 pending.Wait() 1102 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1103 StartingBlock: uint64(len(chain.blocks)/2 - 1), 1104 CurrentBlock: uint64(len(chain.blocks) - 1), 1105 HighestBlock: uint64(len(chain.blocks) - 1), 1106 }) 1107 } 1108 1109 func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { 1110 // Mark this method as a helper to report errors at callsite, not in here 1111 t.Helper() 1112 1113 p := d.Progress() 1114 if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock { 1115 t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) 1116 } 1117 } 1118 1119 // Tests that synchronisation progress (origin block number and highest block 1120 // number) is tracked and updated correctly in case of a fork (or manual head 1121 // revertal). 1122 func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } 1123 func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } 1124 func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } 1125 func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) } 1126 func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) } 1127 func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) } 1128 1129 func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1130 tester := newTester(t) 1131 defer tester.terminate() 1132 1133 chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) 1134 chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) 1135 1136 // Set a sync init hook to catch progress changes 1137 starting := make(chan struct{}) 1138 progress := make(chan struct{}) 1139 1140 tester.downloader.syncInitHook = func(origin, latest uint64) { 1141 starting <- struct{}{} 1142 <-progress 1143 } 1144 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1145 1146 // Synchronise with one of the forks and check progress 1147 tester.newPeer("fork A", protocol, chainA.blocks[1:]) 1148 pending := new(sync.WaitGroup) 1149 pending.Add(1) 1150 go func() { 1151 defer pending.Done() 1152 if err := tester.sync("fork A", nil, mode); err != nil { 1153 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1154 } 1155 }() 1156 <-starting 1157 1158 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1159 HighestBlock: uint64(len(chainA.blocks) - 1), 1160 }) 1161 progress <- struct{}{} 1162 pending.Wait() 1163 1164 // Simulate a successful sync above the fork 1165 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1166 1167 // Synchronise with the second fork and check progress resets 1168 tester.newPeer("fork B", protocol, chainB.blocks[1:]) 1169 pending.Add(1) 1170 go func() { 1171 defer pending.Done() 1172 if err := tester.sync("fork B", nil, mode); err != nil { 1173 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1174 } 1175 }() 1176 <-starting 1177 checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ 1178 StartingBlock: uint64(len(testChainBase.blocks)) - 1, 1179 CurrentBlock: uint64(len(chainA.blocks) - 1), 1180 HighestBlock: uint64(len(chainB.blocks) - 1), 1181 }) 1182 1183 // Check final progress after successful sync 1184 progress <- struct{}{} 1185 pending.Wait() 1186 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1187 StartingBlock: uint64(len(testChainBase.blocks)) - 1, 1188 CurrentBlock: uint64(len(chainB.blocks) - 1), 1189 HighestBlock: uint64(len(chainB.blocks) - 1), 1190 }) 1191 } 1192 1193 // Tests that if synchronisation is aborted due to some failure, then the progress 1194 // origin is not updated in the next sync cycle, as it should be considered the 1195 // continuation of the previous sync and not a new instance. 1196 func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } 1197 func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } 1198 func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } 1199 func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) } 1200 func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) } 1201 func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) } 1202 1203 func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1204 tester := newTester(t) 1205 defer tester.terminate() 1206 1207 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1208 1209 // Set a sync init hook to catch progress changes 1210 starting := make(chan struct{}) 1211 progress := make(chan struct{}) 1212 1213 tester.downloader.syncInitHook = func(origin, latest uint64) { 1214 starting <- struct{}{} 1215 <-progress 1216 } 1217 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1218 1219 // Attempt a full sync with a faulty peer 1220 missing := len(chain.blocks)/2 - 1 1221 1222 faulter := tester.newPeer("faulty", protocol, chain.blocks[1:]) 1223 faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} 1224 1225 pending := new(sync.WaitGroup) 1226 pending.Add(1) 1227 go func() { 1228 defer pending.Done() 1229 if err := tester.sync("faulty", nil, mode); err == nil { 1230 panic("succeeded faulty synchronisation") 1231 } 1232 }() 1233 <-starting 1234 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1235 HighestBlock: uint64(len(chain.blocks) - 1), 1236 }) 1237 progress <- struct{}{} 1238 pending.Wait() 1239 afterFailedSync := tester.downloader.Progress() 1240 1241 // Synchronise with a good peer and check that the progress origin remind the same 1242 // after a failure 1243 tester.newPeer("valid", protocol, chain.blocks[1:]) 1244 pending.Add(1) 1245 go func() { 1246 defer pending.Done() 1247 if err := tester.sync("valid", nil, mode); err != nil { 1248 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1249 } 1250 }() 1251 <-starting 1252 checkProgress(t, tester.downloader, "completing", afterFailedSync) 1253 1254 // Check final progress after successful sync 1255 progress <- struct{}{} 1256 pending.Wait() 1257 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1258 CurrentBlock: uint64(len(chain.blocks) - 1), 1259 HighestBlock: uint64(len(chain.blocks) - 1), 1260 }) 1261 } 1262 1263 // Tests that if an attacker fakes a chain height, after the attack is detected, 1264 // the progress height is successfully reduced at the next sync invocation. 1265 func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } 1266 func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } 1267 func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } 1268 func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) } 1269 func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) } 1270 func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) } 1271 1272 func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1273 tester := newTester(t) 1274 defer tester.terminate() 1275 1276 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1277 1278 // Set a sync init hook to catch progress changes 1279 starting := make(chan struct{}) 1280 progress := make(chan struct{}) 1281 tester.downloader.syncInitHook = func(origin, latest uint64) { 1282 starting <- struct{}{} 1283 <-progress 1284 } 1285 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1286 1287 // Create and sync with an attacker that promises a higher chain than available. 1288 attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) 1289 numMissing := 5 1290 for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- { 1291 attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} 1292 } 1293 pending := new(sync.WaitGroup) 1294 pending.Add(1) 1295 go func() { 1296 defer pending.Done() 1297 if err := tester.sync("attack", nil, mode); err == nil { 1298 panic("succeeded attacker synchronisation") 1299 } 1300 }() 1301 <-starting 1302 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1303 HighestBlock: uint64(len(chain.blocks) - 1), 1304 }) 1305 progress <- struct{}{} 1306 pending.Wait() 1307 afterFailedSync := tester.downloader.Progress() 1308 1309 // Synchronise with a good peer and check that the progress height has been reduced to 1310 // the true value. 1311 validChain := chain.shorten(len(chain.blocks) - numMissing) 1312 tester.newPeer("valid", protocol, validChain.blocks[1:]) 1313 pending.Add(1) 1314 1315 go func() { 1316 defer pending.Done() 1317 if err := tester.sync("valid", nil, mode); err != nil { 1318 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1319 } 1320 }() 1321 <-starting 1322 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1323 CurrentBlock: afterFailedSync.CurrentBlock, 1324 HighestBlock: uint64(len(validChain.blocks) - 1), 1325 }) 1326 // Check final progress after successful sync. 1327 progress <- struct{}{} 1328 pending.Wait() 1329 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1330 CurrentBlock: uint64(len(validChain.blocks) - 1), 1331 HighestBlock: uint64(len(validChain.blocks) - 1), 1332 }) 1333 } 1334 1335 func TestRemoteHeaderRequestSpan(t *testing.T) { 1336 testCases := []struct { 1337 remoteHeight uint64 1338 localHeight uint64 1339 expected []int 1340 }{ 1341 // Remote is way higher. We should ask for the remote head and go backwards 1342 {1500, 1000, 1343 []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, 1344 }, 1345 {15000, 13006, 1346 []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, 1347 }, 1348 // Remote is pretty close to us. We don't have to fetch as many 1349 {1200, 1150, 1350 []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, 1351 }, 1352 // Remote is equal to us (so on a fork with higher td) 1353 // We should get the closest couple of ancestors 1354 {1500, 1500, 1355 []int{1497, 1499}, 1356 }, 1357 // We're higher than the remote! Odd 1358 {1000, 1500, 1359 []int{997, 999}, 1360 }, 1361 // Check some weird edgecases that it behaves somewhat rationally 1362 {0, 1500, 1363 []int{0, 2}, 1364 }, 1365 {6000000, 0, 1366 []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, 1367 }, 1368 {0, 0, 1369 []int{0, 2}, 1370 }, 1371 } 1372 reqs := func(from, count, span int) []int { 1373 var r []int 1374 num := from 1375 for len(r) < count { 1376 r = append(r, num) 1377 num += span + 1 1378 } 1379 return r 1380 } 1381 for i, tt := range testCases { 1382 from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) 1383 data := reqs(int(from), count, span) 1384 1385 if max != uint64(data[len(data)-1]) { 1386 t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) 1387 } 1388 failed := false 1389 if len(data) != len(tt.expected) { 1390 failed = true 1391 t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) 1392 } else { 1393 for j, n := range data { 1394 if n != tt.expected[j] { 1395 failed = true 1396 break 1397 } 1398 } 1399 } 1400 if failed { 1401 res := strings.ReplaceAll(fmt.Sprint(data), " ", ",") 1402 exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",") 1403 t.Logf("got: %v\n", res) 1404 t.Logf("exp: %v\n", exp) 1405 t.Errorf("test %d: wrong values", i) 1406 } 1407 } 1408 } 1409 1410 // Tests that peers below a pre-configured checkpoint block are prevented from 1411 // being fast-synced from, avoiding potential cheap eclipse attacks. 1412 func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) } 1413 func TestCheckpointEnforcement66Snap(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, SnapSync) } 1414 func TestCheckpointEnforcement66Light(t *testing.T) { 1415 testCheckpointEnforcement(t, eth.ETH66, LightSync) 1416 } 1417 func TestCheckpointEnforcement67Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH67, FullSync) } 1418 func TestCheckpointEnforcement67Snap(t *testing.T) { testCheckpointEnforcement(t, eth.ETH67, SnapSync) } 1419 func TestCheckpointEnforcement67Light(t *testing.T) { 1420 testCheckpointEnforcement(t, eth.ETH67, LightSync) 1421 } 1422 1423 func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { 1424 // Create a new tester with a particular hard coded checkpoint block 1425 tester := newTester(t) 1426 defer tester.terminate() 1427 1428 tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256 1429 chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) 1430 1431 // Attempt to sync with the peer and validate the result 1432 tester.newPeer("peer", protocol, chain.blocks[1:]) 1433 1434 var expect error 1435 if mode == SnapSync || mode == LightSync { 1436 expect = errUnsyncedPeer 1437 } 1438 if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) { 1439 t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) 1440 } 1441 if mode == SnapSync || mode == LightSync { 1442 assertOwnChain(t, tester, 1) 1443 } else { 1444 assertOwnChain(t, tester, len(chain.blocks)) 1445 } 1446 } 1447 1448 // Tests that peers below a pre-configured checkpoint block are prevented from 1449 // being fast-synced from, avoiding potential cheap eclipse attacks. 1450 func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } 1451 func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } 1452 1453 func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { 1454 //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) 1455 1456 var cases = []struct { 1457 name string // The name of testing scenario 1458 local int // The length of local chain(canonical chain assumed), 0 means genesis is the head 1459 }{ 1460 {name: "Beacon sync since genesis", local: 0}, 1461 {name: "Beacon sync with short local chain", local: 1}, 1462 {name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2}, 1463 {name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1}, 1464 } 1465 for _, c := range cases { 1466 t.Run(c.name, func(t *testing.T) { 1467 success := make(chan struct{}) 1468 tester := newTesterWithNotification(t, func() { 1469 close(success) 1470 }) 1471 defer tester.terminate() 1472 1473 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1474 tester.newPeer("peer", protocol, chain.blocks[1:]) 1475 1476 // Build the local chain segment if it's required 1477 if c.local > 0 { 1478 tester.chain.InsertChain(chain.blocks[1 : c.local+1]) 1479 } 1480 if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header()); err != nil { 1481 t.Fatalf("Failed to beacon sync chain %v %v", c.name, err) 1482 } 1483 select { 1484 case <-success: 1485 // Ok, downloader fully cancelled after sync cycle 1486 if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != len(chain.blocks) { 1487 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks)) 1488 } 1489 case <-time.NewTimer(time.Second * 3).C: 1490 t.Fatalf("Failed to sync chain in three seconds") 1491 } 1492 }) 1493 } 1494 }