github.com/dominant-strategies/go-quai@v0.28.2/eth/downloader/downloader_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "strings" 24 "sync" 25 "sync/atomic" 26 "testing" 27 "time" 28 29 quai "github.com/dominant-strategies/go-quai" 30 "github.com/dominant-strategies/go-quai/common" 31 "github.com/dominant-strategies/go-quai/core/rawdb" 32 "github.com/dominant-strategies/go-quai/core/state/snapshot" 33 "github.com/dominant-strategies/go-quai/core/types" 34 "github.com/dominant-strategies/go-quai/eth/protocols/eth" 35 "github.com/dominant-strategies/go-quai/ethdb" 36 "github.com/dominant-strategies/go-quai/event" 37 "github.com/dominant-strategies/go-quai/trie" 38 ) 39 40 // Reduce some of the parameters to make the tester faster. 41 func init() { 42 fullMaxForkAncestry = 10000 43 lightMaxForkAncestry = 10000 44 blockCacheMaxItems = 1024 45 fsHeaderContCheck = 500 * time.Millisecond 46 } 47 48 // downloadTester is a test simulator for mocking out local block chain. 49 type downloadTester struct { 50 downloader *Downloader 51 52 genesis *types.Block // Genesis blocks used by the tester and peers 53 stateDb ethdb.Database // Database used by the tester for syncing from peers 54 peerDb ethdb.Database // Database of the peers containing all data 55 peers map[string]*downloadTesterPeer 56 57 ownHashes []common.Hash // Hash chain belonging to the tester 58 ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester 59 ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester 60 ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain 61 62 ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester 63 ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester 64 ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain 65 66 lock sync.RWMutex 67 } 68 69 // newTester creates a new downloader test mocker. 70 func newTester() *downloadTester { 71 tester := &downloadTester{ 72 genesis: testGenesis, 73 peerDb: testDB, 74 peers: make(map[string]*downloadTesterPeer), 75 ownHashes: []common.Hash{testGenesis.Hash()}, 76 ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 77 ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 78 ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 79 80 // Initialize ancient store with test genesis block 81 ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 82 ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 83 ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 84 } 85 tester.stateDb = rawdb.NewMemoryDatabase() 86 tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) 87 88 tester.downloader = New(trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer) 89 return tester 90 } 91 92 // terminate aborts any operations on the embedded downloader and releases all 93 // held resources. 94 func (dl *downloadTester) terminate() { 95 dl.downloader.Terminate() 96 } 97 98 // sync starts synchronizing with a remote peer, blocking until it completes. 99 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 100 dl.lock.RLock() 101 hash := dl.peers[id].chain.headBlock().Hash() 102 // If no particular TD was requested, load from the peer's blockchain 103 if td == nil { 104 td = dl.peers[id].chain.td(hash) 105 } 106 dl.lock.RUnlock() 107 108 // Synchronise with the chosen peer and ensure proper cleanup afterwards 109 err := dl.downloader.synchronise(id, hash, td, mode) 110 select { 111 case <-dl.downloader.cancelCh: 112 // Ok, downloader fully cancelled after sync cycle 113 default: 114 // Downloader is still accepting packets, can block a peer up 115 panic("downloader active post sync cycle") // panic will be caught by tester 116 } 117 return err 118 } 119 120 // HasHeader checks if a header is present in the testers canonical chain. 121 func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { 122 return dl.GetHeaderByHash(hash) != nil 123 } 124 125 // HasBlock checks if a block is present in the testers canonical chain. 126 func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { 127 return dl.GetBlockByHash(hash) != nil 128 } 129 130 // HasFastBlock checks if a block is present in the testers canonical chain. 131 func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { 132 dl.lock.RLock() 133 defer dl.lock.RUnlock() 134 135 return false 136 } 137 138 // GetHeader retrieves a header from the testers canonical chain. 139 func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { 140 dl.lock.RLock() 141 defer dl.lock.RUnlock() 142 return dl.getHeaderByHash(hash) 143 } 144 145 // getHeaderByHash returns the header if found either within ancients or own blocks) 146 // This method assumes that the caller holds at least the read-lock (dl.lock) 147 func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header { 148 header := dl.ancientHeaders[hash] 149 if header != nil { 150 return header 151 } 152 return dl.ownHeaders[hash] 153 } 154 155 // GetBlock retrieves a block from the testers canonical chain. 156 func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { 157 dl.lock.RLock() 158 defer dl.lock.RUnlock() 159 160 block := dl.ancientBlocks[hash] 161 if block != nil { 162 return block 163 } 164 return dl.ownBlocks[hash] 165 } 166 167 // CurrentHeader retrieves the current head header from the canonical chain. 168 func (dl *downloadTester) CurrentHeader() *types.Header { 169 dl.lock.RLock() 170 defer dl.lock.RUnlock() 171 172 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 173 if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil { 174 return header 175 } 176 if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { 177 return header 178 } 179 } 180 return dl.genesis.Header() 181 } 182 183 // CurrentBlock retrieves the current head block from the canonical chain. 184 func (dl *downloadTester) CurrentBlock() *types.Block { 185 dl.lock.RLock() 186 defer dl.lock.RUnlock() 187 188 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 189 if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { 190 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 191 return block 192 } 193 return block 194 } 195 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 196 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 197 return block 198 } 199 } 200 } 201 return dl.genesis 202 } 203 204 // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. 205 func (dl *downloadTester) CurrentFastBlock() *types.Block { 206 dl.lock.RLock() 207 defer dl.lock.RUnlock() 208 209 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 210 if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { 211 return block 212 } 213 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 214 return block 215 } 216 } 217 return dl.genesis 218 } 219 220 // FastSyncCommitHead manually sets the head block to a given hash. 221 func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { 222 // For now only check that the state trie is correct 223 if block := dl.GetBlockByHash(hash); block != nil { 224 _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb)) 225 return err 226 } 227 return fmt.Errorf("non existent block: %x", hash[:4]) 228 } 229 230 // GetTd retrieves the block's total difficulty from the canonical chain. 231 func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { 232 dl.lock.RLock() 233 defer dl.lock.RUnlock() 234 235 return dl.getTd(hash) 236 } 237 238 // getTd retrieves the block's total difficulty if found either within 239 // ancients or own blocks). 240 // This method assumes that the caller holds at least the read-lock (dl.lock) 241 func (dl *downloadTester) getTd(hash common.Hash) *big.Int { 242 if td := dl.ancientChainTd[hash]; td != nil { 243 return td 244 } 245 return dl.ownChainTd[hash] 246 } 247 248 // InsertHeaderChain injects a new batch of headers into the simulated chain. 249 func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) { 250 dl.lock.Lock() 251 defer dl.lock.Unlock() 252 // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors 253 if dl.getHeaderByHash(headers[0].ParentHash()) == nil { 254 return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number()) 255 } 256 var hashes []common.Hash 257 for i := 1; i < len(headers); i++ { 258 hash := headers[i-1].Hash() 259 if headers[i].ParentHash() != headers[i-1].Hash() { 260 return i, fmt.Errorf("non-contiguous import at position %d", i) 261 } 262 hashes = append(hashes, hash) 263 } 264 hashes = append(hashes, headers[len(headers)-1].Hash()) 265 // Do a full insert if pre-checks passed 266 for i, header := range headers { 267 hash := hashes[i] 268 if dl.getHeaderByHash(hash) != nil { 269 continue 270 } 271 if dl.getHeaderByHash(header.ParentHash()) == nil { 272 // This _should_ be impossible, due to precheck and induction 273 return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i) 274 } 275 dl.ownHashes = append(dl.ownHashes, hash) 276 dl.ownHeaders[hash] = header 277 278 td := dl.getTd(header.ParentHash()) 279 dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty()) 280 } 281 return len(headers), nil 282 } 283 284 // InsertChain injects a new batch of blocks into the simulated chain. 285 func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) { 286 dl.lock.Lock() 287 defer dl.lock.Unlock() 288 for i, block := range blocks { 289 if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { 290 return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks)) 291 } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { 292 return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err) 293 } 294 if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil { 295 dl.ownHashes = append(dl.ownHashes, block.Hash()) 296 dl.ownHeaders[block.Hash()] = block.Header() 297 } 298 dl.ownBlocks[block.Hash()] = block 299 dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) 300 td := dl.getTd(block.ParentHash()) 301 dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty()) 302 } 303 return len(blocks), nil 304 } 305 306 // SetHead rewinds the local chain to a new head. 307 func (dl *downloadTester) SetHead(head uint64) error { 308 dl.lock.Lock() 309 defer dl.lock.Unlock() 310 311 // Find the hash of the head to reset to 312 var hash common.Hash 313 for h, header := range dl.ownHeaders { 314 if header.Number().Uint64() == head { 315 hash = h 316 } 317 } 318 for h, header := range dl.ancientHeaders { 319 if header.Number().Uint64() == head { 320 hash = h 321 } 322 } 323 if hash == (common.Hash{}) { 324 return fmt.Errorf("unknown head to set: %d", head) 325 } 326 // Find the offset in the header chain 327 var offset int 328 for o, h := range dl.ownHashes { 329 if h == hash { 330 offset = o 331 break 332 } 333 } 334 // Remove all the hashes and associated data afterwards 335 for i := offset + 1; i < len(dl.ownHashes); i++ { 336 delete(dl.ownChainTd, dl.ownHashes[i]) 337 delete(dl.ownHeaders, dl.ownHashes[i]) 338 delete(dl.ownBlocks, dl.ownHashes[i]) 339 340 delete(dl.ancientChainTd, dl.ownHashes[i]) 341 delete(dl.ancientHeaders, dl.ownHashes[i]) 342 delete(dl.ancientBlocks, dl.ownHashes[i]) 343 } 344 dl.ownHashes = dl.ownHashes[:offset+1] 345 return nil 346 } 347 348 // Rollback removes some recently added elements from the chain. 349 func (dl *downloadTester) Rollback(hashes []common.Hash) { 350 } 351 352 // newPeer registers a new block download source into the downloader. 353 func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error { 354 dl.lock.Lock() 355 defer dl.lock.Unlock() 356 357 peer := &downloadTesterPeer{dl: dl, id: id, chain: chain} 358 dl.peers[id] = peer 359 return dl.downloader.RegisterPeer(id, version, peer) 360 } 361 362 // dropPeer simulates a hard peer removal from the connection pool. 363 func (dl *downloadTester) dropPeer(id string) { 364 dl.lock.Lock() 365 defer dl.lock.Unlock() 366 367 delete(dl.peers, id) 368 dl.downloader.UnregisterPeer(id) 369 } 370 371 // Snapshots implements the BlockChain interface for the downloader, but is a noop. 372 func (dl *downloadTester) Snapshots() *snapshot.Tree { 373 return nil 374 } 375 376 type downloadTesterPeer struct { 377 dl *downloadTester 378 id string 379 chain *testChain 380 missingStates map[common.Hash]bool // State entries that fast sync should not return 381 } 382 383 // Head constructs a function to retrieve a peer's current head hash 384 // and total difficulty. 385 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 386 b := dlp.chain.headBlock() 387 return b.Hash(), dlp.chain.td(b.Hash()) 388 } 389 390 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 391 // origin; associated with a particular peer in the download tester. The returned 392 // function can be used to retrieve batches of headers from the particular peer. 393 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { 394 result := dlp.chain.headersByHash(origin, amount, skip, reverse) 395 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 396 return nil 397 } 398 399 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 400 // origin; associated with a particular peer in the download tester. The returned 401 // function can be used to retrieve batches of headers from the particular peer. 402 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { 403 result := dlp.chain.headersByNumber(origin, amount, skip, reverse) 404 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 405 return nil 406 } 407 408 // RequestBodies constructs a getBlockBodies method associated with a particular 409 // peer in the download tester. The returned function can be used to retrieve 410 // batches of block bodies from the particularly requested peer. 411 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { 412 txs, uncles := dlp.chain.bodies(hashes) 413 go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles) 414 return nil 415 } 416 417 // RequestNodeData constructs a getNodeData method associated with a particular 418 // peer in the download tester. The returned function can be used to retrieve 419 // batches of node state data from the particularly requested peer. 420 func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { 421 dlp.dl.lock.RLock() 422 defer dlp.dl.lock.RUnlock() 423 424 results := make([][]byte, 0, len(hashes)) 425 for _, hash := range hashes { 426 if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { 427 if !dlp.missingStates[hash] { 428 results = append(results, data) 429 } 430 } 431 } 432 go dlp.dl.downloader.DeliverNodeData(dlp.id, results) 433 return nil 434 } 435 436 // assertOwnChain checks if the local chain contains the correct number of items 437 // of the various chain components. 438 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 439 // Mark this method as a helper to report errors at callsite, not in here 440 t.Helper() 441 442 assertOwnForkedChain(t, tester, 1, []int{length}) 443 } 444 445 // assertOwnForkedChain checks if the local forked chain contains the correct 446 // number of items of the various chain components. 447 func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { 448 // Mark this method as a helper to report errors at callsite, not in here 449 t.Helper() 450 451 // Initialize the counters for the first fork 452 headers, blocks := lengths[0], lengths[0] 453 454 // Update the counters for each subsequent fork 455 for _, length := range lengths[1:] { 456 headers += length - common 457 blocks += length - common 458 } 459 if tester.downloader.getMode() == LightSync { 460 blocks = 1 461 } 462 if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers { 463 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 464 } 465 if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks { 466 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 467 } 468 } 469 470 func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonSync(t, eth.ETH65, FullSync) } 471 func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonSync(t, eth.ETH65, FastSync) } 472 func TestCanonicalSynchronisation65Light(t *testing.T) { testCanonSync(t, eth.ETH65, LightSync) } 473 474 func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } 475 func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, eth.ETH66, FastSync) } 476 func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } 477 478 func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { 479 t.Parallel() 480 481 tester := newTester() 482 defer tester.terminate() 483 484 // Create a small enough block chain to download 485 chain := testChainBase.shorten(blockCacheMaxItems - 15) 486 tester.newPeer("peer", protocol, chain) 487 488 // Synchronise with the peer and make sure all relevant data was retrieved 489 if err := tester.sync("peer", nil, mode); err != nil { 490 t.Fatalf("failed to synchronise blocks: %v", err) 491 } 492 assertOwnChain(t, tester, chain.len()) 493 } 494 495 // Tests that if a large batch of blocks are being downloaded, it is throttled 496 // until the cached blocks are retrieved. 497 func TestThrottling65Full(t *testing.T) { testThrottling(t, eth.ETH65, FullSync) } 498 func TestThrottling65Fast(t *testing.T) { testThrottling(t, eth.ETH65, FastSync) } 499 500 func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } 501 func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) } 502 503 func testThrottling(t *testing.T, protocol uint, mode SyncMode) { 504 t.Parallel() 505 tester := newTester() 506 507 // Create a long block chain to download and the tester 508 targetBlocks := testChainBase.len() - 1 509 tester.newPeer("peer", protocol, testChainBase) 510 511 // Wrap the importer to allow stepping 512 blocked, proceed := uint32(0), make(chan struct{}) 513 tester.downloader.chainInsertHook = func(results []*fetchResult) { 514 atomic.StoreUint32(&blocked, uint32(len(results))) 515 <-proceed 516 } 517 // Start a synchronisation concurrently 518 errc := make(chan error, 1) 519 go func() { 520 errc <- tester.sync("peer", nil, mode) 521 }() 522 // Iteratively take some blocks, always checking the retrieval count 523 for { 524 // Check the retrieval count synchronously (! reason for this ugly block) 525 tester.lock.RLock() 526 retrieved := len(tester.ownBlocks) 527 tester.lock.RUnlock() 528 if retrieved >= targetBlocks+1 { 529 break 530 } 531 // Wait a bit for sync to throttle itself 532 var cached, frozen int 533 for start := time.Now(); time.Since(start) < 3*time.Second; { 534 time.Sleep(25 * time.Millisecond) 535 536 tester.lock.Lock() 537 tester.downloader.queue.lock.Lock() 538 tester.downloader.queue.resultCache.lock.Lock() 539 { 540 cached = tester.downloader.queue.resultCache.countCompleted() 541 frozen = int(atomic.LoadUint32(&blocked)) 542 retrieved = len(tester.ownBlocks) 543 } 544 tester.downloader.queue.resultCache.lock.Unlock() 545 tester.downloader.queue.lock.Unlock() 546 tester.lock.Unlock() 547 548 if cached == blockCacheMaxItems || 549 cached == blockCacheMaxItems-reorgProtHeaderDelay || 550 retrieved+cached+frozen == targetBlocks+1 || 551 retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { 552 break 553 } 554 } 555 // Make sure we filled up the cache, then exhaust it 556 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 557 tester.lock.RLock() 558 retrieved = len(tester.ownBlocks) 559 tester.lock.RUnlock() 560 if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { 561 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) 562 } 563 564 // Permit the blocked blocks to import 565 if atomic.LoadUint32(&blocked) > 0 { 566 atomic.StoreUint32(&blocked, uint32(0)) 567 proceed <- struct{}{} 568 } 569 } 570 // Check that we haven't pulled more blocks than available 571 assertOwnChain(t, tester, targetBlocks+1) 572 if err := <-errc; err != nil { 573 t.Fatalf("block synchronization failed: %v", err) 574 } 575 tester.terminate() 576 577 } 578 579 // Tests that simple synchronization against a forked chain works correctly. In 580 // this test common ancestor lookup should *not* be short circuited, and a full 581 // binary search should be executed. 582 func TestForkedSync65Full(t *testing.T) { testForkedSync(t, eth.ETH65, FullSync) } 583 func TestForkedSync65Fast(t *testing.T) { testForkedSync(t, eth.ETH65, FastSync) } 584 func TestForkedSync65Light(t *testing.T) { testForkedSync(t, eth.ETH65, LightSync) } 585 586 func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } 587 func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, eth.ETH66, FastSync) } 588 func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } 589 590 func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { 591 t.Parallel() 592 593 tester := newTester() 594 defer tester.terminate() 595 596 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 597 chainB := testChainForkLightB.shorten(testChainBase.len() + 80) 598 tester.newPeer("fork A", protocol, chainA) 599 tester.newPeer("fork B", protocol, chainB) 600 // Synchronise with the peer and make sure all blocks were retrieved 601 if err := tester.sync("fork A", nil, mode); err != nil { 602 t.Fatalf("failed to synchronise blocks: %v", err) 603 } 604 assertOwnChain(t, tester, chainA.len()) 605 606 // Synchronise with the second peer and make sure that fork is pulled too 607 if err := tester.sync("fork B", nil, mode); err != nil { 608 t.Fatalf("failed to synchronise blocks: %v", err) 609 } 610 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 611 } 612 613 // Tests that synchronising against a much shorter but much heavyer fork works 614 // corrently and is not dropped. 615 func TestHeavyForkedSync65Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, FullSync) } 616 func TestHeavyForkedSync65Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, FastSync) } 617 func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, LightSync) } 618 619 func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } 620 func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) } 621 func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } 622 623 func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { 624 t.Parallel() 625 626 tester := newTester() 627 defer tester.terminate() 628 629 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 630 chainB := testChainForkHeavy.shorten(testChainBase.len() + 80) 631 tester.newPeer("light", protocol, chainA) 632 tester.newPeer("heavy", protocol, chainB) 633 634 // Synchronise with the peer and make sure all blocks were retrieved 635 if err := tester.sync("light", nil, mode); err != nil { 636 t.Fatalf("failed to synchronise blocks: %v", err) 637 } 638 assertOwnChain(t, tester, chainA.len()) 639 640 // Synchronise with the second peer and make sure that fork is pulled too 641 if err := tester.sync("heavy", nil, mode); err != nil { 642 t.Fatalf("failed to synchronise blocks: %v", err) 643 } 644 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 645 } 646 647 // Tests that chain forks are contained within a certain interval of the current 648 // chain head, ensuring that malicious peers cannot waste resources by feeding 649 // long dead chains. 650 func TestBoundedForkedSync65Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, FullSync) } 651 func TestBoundedForkedSync65Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, FastSync) } 652 func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, LightSync) } 653 654 func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } 655 func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FastSync) } 656 func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } 657 658 func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { 659 t.Parallel() 660 661 tester := newTester() 662 defer tester.terminate() 663 664 chainA := testChainForkLightA 665 chainB := testChainForkLightB 666 tester.newPeer("original", protocol, chainA) 667 tester.newPeer("rewriter", protocol, chainB) 668 669 // Synchronise with the peer and make sure all blocks were retrieved 670 if err := tester.sync("original", nil, mode); err != nil { 671 t.Fatalf("failed to synchronise blocks: %v", err) 672 } 673 assertOwnChain(t, tester, chainA.len()) 674 675 // Synchronise with the second peer and ensure that the fork is rejected to being too old 676 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 677 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 678 } 679 } 680 681 // Tests that chain forks are contained within a certain interval of the current 682 // chain head for short but heavy forks too. These are a bit special because they 683 // take different ancestor lookup paths. 684 func TestBoundedHeavyForkedSync65Full(t *testing.T) { 685 testBoundedHeavyForkedSync(t, eth.ETH65, FullSync) 686 } 687 func TestBoundedHeavyForkedSync65Fast(t *testing.T) { 688 testBoundedHeavyForkedSync(t, eth.ETH65, FastSync) 689 } 690 func TestBoundedHeavyForkedSync65Light(t *testing.T) { 691 testBoundedHeavyForkedSync(t, eth.ETH65, LightSync) 692 } 693 694 func TestBoundedHeavyForkedSync66Full(t *testing.T) { 695 testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) 696 } 697 func TestBoundedHeavyForkedSync66Fast(t *testing.T) { 698 testBoundedHeavyForkedSync(t, eth.ETH66, FastSync) 699 } 700 func TestBoundedHeavyForkedSync66Light(t *testing.T) { 701 testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) 702 } 703 704 func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { 705 t.Parallel() 706 tester := newTester() 707 708 // Create a long enough forked chain 709 chainA := testChainForkLightA 710 chainB := testChainForkHeavy 711 tester.newPeer("original", protocol, chainA) 712 713 // Synchronise with the peer and make sure all blocks were retrieved 714 if err := tester.sync("original", nil, mode); err != nil { 715 t.Fatalf("failed to synchronise blocks: %v", err) 716 } 717 assertOwnChain(t, tester, chainA.len()) 718 719 tester.newPeer("heavy-rewriter", protocol, chainB) 720 // Synchronise with the second peer and ensure that the fork is rejected to being too old 721 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 722 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 723 } 724 tester.terminate() 725 } 726 727 // Tests that an inactive downloader will not accept incoming block headers, 728 // bodies and receipts. 729 func TestInactiveDownloader63(t *testing.T) { 730 t.Parallel() 731 732 tester := newTester() 733 defer tester.terminate() 734 735 // Check that neither block headers nor bodies are accepted 736 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 737 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 738 } 739 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 740 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 741 } 742 } 743 744 // Tests that a canceled download wipes all previously accumulated state. 745 func TestCancel65Full(t *testing.T) { testCancel(t, eth.ETH65, FullSync) } 746 func TestCancel65Fast(t *testing.T) { testCancel(t, eth.ETH65, FastSync) } 747 func TestCancel65Light(t *testing.T) { testCancel(t, eth.ETH65, LightSync) } 748 749 func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } 750 func TestCancel66Fast(t *testing.T) { testCancel(t, eth.ETH66, FastSync) } 751 func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } 752 753 func testCancel(t *testing.T, protocol uint, mode SyncMode) { 754 t.Parallel() 755 756 tester := newTester() 757 defer tester.terminate() 758 759 chain := testChainBase.shorten(MaxHeaderFetch) 760 tester.newPeer("peer", protocol, chain) 761 762 // Make sure canceling works with a pristine downloader 763 tester.downloader.Cancel() 764 if !tester.downloader.queue.Idle() { 765 t.Errorf("download queue not idle") 766 } 767 // Synchronise with the peer, but cancel afterwards 768 if err := tester.sync("peer", nil, mode); err != nil { 769 t.Fatalf("failed to synchronise blocks: %v", err) 770 } 771 tester.downloader.Cancel() 772 if !tester.downloader.queue.Idle() { 773 t.Errorf("download queue not idle") 774 } 775 } 776 777 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 778 func TestMultiSynchronisation65Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, FullSync) } 779 func TestMultiSynchronisation65Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, FastSync) } 780 func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, LightSync) } 781 782 func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } 783 func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FastSync) } 784 func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } 785 786 func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { 787 t.Parallel() 788 789 tester := newTester() 790 defer tester.terminate() 791 792 // Create various peers with various parts of the chain 793 targetPeers := 8 794 chain := testChainBase.shorten(targetPeers * 100) 795 796 for i := 0; i < targetPeers; i++ { 797 id := fmt.Sprintf("peer #%d", i) 798 tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1))) 799 } 800 if err := tester.sync("peer #0", nil, mode); err != nil { 801 t.Fatalf("failed to synchronise blocks: %v", err) 802 } 803 assertOwnChain(t, tester, chain.len()) 804 } 805 806 // Tests that synchronisations behave well in multi-version protocol environments 807 // and not wreak havoc on other nodes in the network. 808 func TestMultiProtoSynchronisation65Full(t *testing.T) { testMultiProtoSync(t, eth.ETH65, FullSync) } 809 func TestMultiProtoSynchronisation65Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH65, FastSync) } 810 func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, eth.ETH65, LightSync) } 811 812 func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } 813 func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FastSync) } 814 func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } 815 816 func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { 817 t.Parallel() 818 819 tester := newTester() 820 defer tester.terminate() 821 822 // Create a small enough block chain to download 823 chain := testChainBase.shorten(blockCacheMaxItems - 15) 824 825 // Create peers of every type 826 tester.newPeer("peer 65", eth.ETH65, chain) 827 tester.newPeer("peer 66", eth.ETH66, chain) 828 829 // Synchronise with the requested peer and make sure all blocks were retrieved 830 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 831 t.Fatalf("failed to synchronise blocks: %v", err) 832 } 833 assertOwnChain(t, tester, chain.len()) 834 835 // Check that no peers have been dropped off 836 for _, version := range []int{65, 66} { 837 peer := fmt.Sprintf("peer %d", version) 838 if _, ok := tester.peers[peer]; !ok { 839 t.Errorf("%s dropped", peer) 840 } 841 } 842 } 843 844 // Tests that if a block is empty (e.g. header only), no body request should be 845 // made, and instead the header should be assembled into a whole block in itself. 846 func TestEmptyShortCircuit65Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, FullSync) } 847 func TestEmptyShortCircuit65Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, FastSync) } 848 func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, LightSync) } 849 850 func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } 851 func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FastSync) } 852 func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } 853 854 func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { 855 t.Parallel() 856 857 tester := newTester() 858 defer tester.terminate() 859 860 // Create a block chain to download 861 chain := testChainBase 862 tester.newPeer("peer", protocol, chain) 863 864 // Instrument the downloader to signal body requests 865 bodiesHave := int32(0) 866 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 867 atomic.AddInt32(&bodiesHave, int32(len(headers))) 868 } 869 // Synchronise with the peer and make sure all blocks were retrieved 870 if err := tester.sync("peer", nil, mode); err != nil { 871 t.Fatalf("failed to synchronise blocks: %v", err) 872 } 873 assertOwnChain(t, tester, chain.len()) 874 875 // Validate the number of block bodies that should have been requested 876 bodiesNeeded := 0 877 for _, block := range chain.blockm { 878 if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 879 bodiesNeeded++ 880 } 881 } 882 if int(bodiesHave) != bodiesNeeded { 883 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 884 } 885 } 886 887 // Tests that headers are enqueued continuously, preventing malicious nodes from 888 // stalling the downloader by feeding gapped header chains. 889 func TestMissingHeaderAttack65Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, FullSync) } 890 func TestMissingHeaderAttack65Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, FastSync) } 891 func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, LightSync) } 892 893 func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } 894 func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FastSync) } 895 func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } 896 897 func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { 898 t.Parallel() 899 900 tester := newTester() 901 defer tester.terminate() 902 903 chain := testChainBase.shorten(blockCacheMaxItems - 15) 904 brokenChain := chain.shorten(chain.len()) 905 delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2]) 906 tester.newPeer("attack", protocol, brokenChain) 907 908 if err := tester.sync("attack", nil, mode); err == nil { 909 t.Fatalf("succeeded attacker synchronisation") 910 } 911 // Synchronise with the valid peer and make sure sync succeeds 912 tester.newPeer("valid", protocol, chain) 913 if err := tester.sync("valid", nil, mode); err != nil { 914 t.Fatalf("failed to synchronise blocks: %v", err) 915 } 916 assertOwnChain(t, tester, chain.len()) 917 } 918 919 // Tests that if requested headers are shifted (i.e. first is missing), the queue 920 // detects the invalid numbering. 921 func TestShiftedHeaderAttack65Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, FullSync) } 922 func TestShiftedHeaderAttack65Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, FastSync) } 923 func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, LightSync) } 924 925 func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } 926 func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FastSync) } 927 func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } 928 929 func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { 930 t.Parallel() 931 932 tester := newTester() 933 defer tester.terminate() 934 935 chain := testChainBase.shorten(blockCacheMaxItems - 15) 936 937 // Attempt a full sync with an attacker feeding shifted headers 938 brokenChain := chain.shorten(chain.len()) 939 delete(brokenChain.headerm, brokenChain.chain[1]) 940 delete(brokenChain.blockm, brokenChain.chain[1]) 941 tester.newPeer("attack", protocol, brokenChain) 942 if err := tester.sync("attack", nil, mode); err == nil { 943 t.Fatalf("succeeded attacker synchronisation") 944 } 945 946 // Synchronise with the valid peer and make sure sync succeeds 947 tester.newPeer("valid", protocol, chain) 948 if err := tester.sync("valid", nil, mode); err != nil { 949 t.Fatalf("failed to synchronise blocks: %v", err) 950 } 951 assertOwnChain(t, tester, chain.len()) 952 } 953 954 // Tests that upon detecting an invalid header, the recent ones are rolled back 955 // for various failure scenarios. Afterwards a full sync is attempted to make 956 // sure no state was corrupted. 957 func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH65, FastSync) } 958 func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) } 959 960 func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { 961 t.Parallel() 962 963 tester := newTester() 964 965 // Create a small enough block chain to download 966 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 967 chain := testChainBase.shorten(targetBlocks) 968 969 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 970 // This should result in the last fsHeaderSafetyNet headers being rolled back. 971 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 972 fastAttackChain := chain.shorten(chain.len()) 973 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) 974 tester.newPeer("fast-attack", protocol, fastAttackChain) 975 976 if err := tester.sync("fast-attack", nil, mode); err == nil { 977 t.Fatalf("succeeded fast attacker synchronisation") 978 } 979 if head := tester.CurrentHeader().Number().Int64(); int(head) > MaxHeaderFetch { 980 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 981 } 982 983 // Attempt to sync with an attacker that feeds junk during the block import phase. 984 // This should result in both the last fsHeaderSafetyNet number of headers being 985 // rolled back, and also the pivot point being reverted to a non-block status. 986 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 987 blockAttackChain := chain.shorten(chain.len()) 988 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in 989 delete(blockAttackChain.headerm, blockAttackChain.chain[missing]) 990 tester.newPeer("block-attack", protocol, blockAttackChain) 991 992 if err := tester.sync("block-attack", nil, mode); err == nil { 993 t.Fatalf("succeeded block attacker synchronisation") 994 } 995 if head := tester.CurrentHeader().Number().Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 996 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 997 } 998 if mode == FastSync { 999 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1000 t.Errorf("fast sync pivot block #%d not rolled back", head) 1001 } 1002 } 1003 1004 // Attempt to sync with an attacker that withholds promised blocks after the 1005 // fast sync pivot point. This could be a trial to leave the node with a bad 1006 // but already imported pivot block. 1007 withholdAttackChain := chain.shorten(chain.len()) 1008 tester.newPeer("withhold-attack", protocol, withholdAttackChain) 1009 tester.downloader.syncInitHook = func(uint64, uint64) { 1010 for i := missing; i < withholdAttackChain.len(); i++ { 1011 delete(withholdAttackChain.headerm, withholdAttackChain.chain[i]) 1012 } 1013 tester.downloader.syncInitHook = nil 1014 } 1015 if err := tester.sync("withhold-attack", nil, mode); err == nil { 1016 t.Fatalf("succeeded withholding attacker synchronisation") 1017 } 1018 if head := tester.CurrentHeader().Number().Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1019 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1020 } 1021 if mode == FastSync { 1022 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1023 t.Errorf("fast sync pivot block #%d not rolled back", head) 1024 } 1025 } 1026 1027 // synchronise with the valid peer and make sure sync succeeds. Since the last rollback 1028 // should also disable fast syncing for this process, verify that we did a fresh full 1029 // sync. Note, we can't assert anything about the receipts since we won't purge the 1030 // database of them, hence we can't use assertOwnChain. 1031 tester.newPeer("valid", protocol, chain) 1032 if err := tester.sync("valid", nil, mode); err != nil { 1033 t.Fatalf("failed to synchronise blocks: %v", err) 1034 } 1035 if hs := len(tester.ownHeaders); hs != chain.len() { 1036 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len()) 1037 } 1038 if mode != LightSync { 1039 if bs := len(tester.ownBlocks); bs != chain.len() { 1040 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len()) 1041 } 1042 } 1043 tester.terminate() 1044 } 1045 1046 // Tests that a peer advertising a high TD doesn't get to stall the downloader 1047 // afterwards by not sending any useful hashes. 1048 func TestHighTDStarvationAttack65Full(t *testing.T) { 1049 testHighTDStarvationAttack(t, eth.ETH65, FullSync) 1050 } 1051 func TestHighTDStarvationAttack65Fast(t *testing.T) { 1052 testHighTDStarvationAttack(t, eth.ETH65, FastSync) 1053 } 1054 func TestHighTDStarvationAttack65Light(t *testing.T) { 1055 testHighTDStarvationAttack(t, eth.ETH65, LightSync) 1056 } 1057 1058 func TestHighTDStarvationAttack66Full(t *testing.T) { 1059 testHighTDStarvationAttack(t, eth.ETH66, FullSync) 1060 } 1061 func TestHighTDStarvationAttack66Fast(t *testing.T) { 1062 testHighTDStarvationAttack(t, eth.ETH66, FastSync) 1063 } 1064 func TestHighTDStarvationAttack66Light(t *testing.T) { 1065 testHighTDStarvationAttack(t, eth.ETH66, LightSync) 1066 } 1067 1068 func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { 1069 t.Parallel() 1070 1071 tester := newTester() 1072 1073 chain := testChainBase.shorten(1) 1074 tester.newPeer("attack", protocol, chain) 1075 if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { 1076 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 1077 } 1078 tester.terminate() 1079 } 1080 1081 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 1082 func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH65) } 1083 func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } 1084 1085 func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { 1086 t.Parallel() 1087 1088 // Define the disconnection requirement for individual hash fetch errors 1089 tests := []struct { 1090 result error 1091 drop bool 1092 }{ 1093 {nil, false}, // Sync succeeded, all is well 1094 {errBusy, false}, // Sync is already in progress, no problem 1095 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1096 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1097 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1098 {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it 1099 {errNoPeers, false}, // No peers to download from, soft race, no issue 1100 {errTimeout, true}, // No hashes received in due time, drop the peer 1101 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1102 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1103 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1104 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1105 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1106 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1107 } 1108 // Run the tests and check disconnection status 1109 tester := newTester() 1110 defer tester.terminate() 1111 chain := testChainBase.shorten(1) 1112 1113 for i, tt := range tests { 1114 // Register a new peer and ensure its presence 1115 id := fmt.Sprintf("test %d", i) 1116 if err := tester.newPeer(id, protocol, chain); err != nil { 1117 t.Fatalf("test %d: failed to register new peer: %v", i, err) 1118 } 1119 if _, ok := tester.peers[id]; !ok { 1120 t.Fatalf("test %d: registered peer not found", i) 1121 } 1122 // Simulate a synchronisation and check the required result 1123 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1124 1125 tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) 1126 if _, ok := tester.peers[id]; !ok != tt.drop { 1127 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1128 } 1129 } 1130 } 1131 1132 // Tests that synchronisation progress (origin block number, current block number 1133 // and highest block number) is tracked and updated correctly. 1134 func TestSyncProgress65Full(t *testing.T) { testSyncProgress(t, eth.ETH65, FullSync) } 1135 func TestSyncProgress65Fast(t *testing.T) { testSyncProgress(t, eth.ETH65, FastSync) } 1136 func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, eth.ETH65, LightSync) } 1137 1138 func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } 1139 func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, eth.ETH66, FastSync) } 1140 func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } 1141 1142 func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1143 t.Parallel() 1144 1145 tester := newTester() 1146 defer tester.terminate() 1147 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1148 1149 // Set a sync init hook to catch progress changes 1150 starting := make(chan struct{}) 1151 progress := make(chan struct{}) 1152 1153 tester.downloader.syncInitHook = func(origin, latest uint64) { 1154 starting <- struct{}{} 1155 <-progress 1156 } 1157 checkProgress(t, tester.downloader, "pristine", quai.SyncProgress{}) 1158 1159 // Synchronise half the blocks and check initial progress 1160 tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2)) 1161 pending := new(sync.WaitGroup) 1162 pending.Add(1) 1163 1164 go func() { 1165 defer pending.Done() 1166 if err := tester.sync("peer-half", nil, mode); err != nil { 1167 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1168 } 1169 }() 1170 <-starting 1171 checkProgress(t, tester.downloader, "initial", quai.SyncProgress{ 1172 HighestBlock: uint64(chain.len()/2 - 1), 1173 }) 1174 progress <- struct{}{} 1175 pending.Wait() 1176 1177 // Synchronise all the blocks and check continuation progress 1178 tester.newPeer("peer-full", protocol, chain) 1179 pending.Add(1) 1180 go func() { 1181 defer pending.Done() 1182 if err := tester.sync("peer-full", nil, mode); err != nil { 1183 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1184 } 1185 }() 1186 <-starting 1187 checkProgress(t, tester.downloader, "completing", quai.SyncProgress{ 1188 StartingBlock: uint64(chain.len()/2 - 1), 1189 CurrentBlock: uint64(chain.len()/2 - 1), 1190 HighestBlock: uint64(chain.len() - 1), 1191 }) 1192 1193 // Check final progress after successful sync 1194 progress <- struct{}{} 1195 pending.Wait() 1196 checkProgress(t, tester.downloader, "final", quai.SyncProgress{ 1197 StartingBlock: uint64(chain.len()/2 - 1), 1198 CurrentBlock: uint64(chain.len() - 1), 1199 HighestBlock: uint64(chain.len() - 1), 1200 }) 1201 } 1202 1203 func checkProgress(t *testing.T, d *Downloader, stage string, want quai.SyncProgress) { 1204 // Mark this method as a helper to report errors at callsite, not in here 1205 t.Helper() 1206 1207 p := d.Progress() 1208 p.KnownStates, p.PulledStates = 0, 0 1209 want.KnownStates, want.PulledStates = 0, 0 1210 if p != want { 1211 t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) 1212 } 1213 } 1214 1215 // Tests that synchronisation progress (origin block number and highest block 1216 // number) is tracked and updated correctly in case of a fork (or manual head 1217 // revertal). 1218 func TestForkedSyncProgress65Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, FullSync) } 1219 func TestForkedSyncProgress65Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, FastSync) } 1220 func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, LightSync) } 1221 1222 func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } 1223 func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FastSync) } 1224 func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } 1225 1226 func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1227 t.Parallel() 1228 1229 tester := newTester() 1230 defer tester.terminate() 1231 chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch) 1232 chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch) 1233 1234 // Set a sync init hook to catch progress changes 1235 starting := make(chan struct{}) 1236 progress := make(chan struct{}) 1237 1238 tester.downloader.syncInitHook = func(origin, latest uint64) { 1239 starting <- struct{}{} 1240 <-progress 1241 } 1242 checkProgress(t, tester.downloader, "pristine", quai.SyncProgress{}) 1243 1244 // Synchronise with one of the forks and check progress 1245 tester.newPeer("fork A", protocol, chainA) 1246 pending := new(sync.WaitGroup) 1247 pending.Add(1) 1248 go func() { 1249 defer pending.Done() 1250 if err := tester.sync("fork A", nil, mode); err != nil { 1251 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1252 } 1253 }() 1254 <-starting 1255 1256 checkProgress(t, tester.downloader, "initial", quai.SyncProgress{ 1257 HighestBlock: uint64(chainA.len() - 1), 1258 }) 1259 progress <- struct{}{} 1260 pending.Wait() 1261 1262 // Simulate a successful sync above the fork 1263 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1264 1265 // Synchronise with the second fork and check progress resets 1266 tester.newPeer("fork B", protocol, chainB) 1267 pending.Add(1) 1268 go func() { 1269 defer pending.Done() 1270 if err := tester.sync("fork B", nil, mode); err != nil { 1271 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1272 } 1273 }() 1274 <-starting 1275 checkProgress(t, tester.downloader, "forking", quai.SyncProgress{ 1276 StartingBlock: uint64(testChainBase.len()) - 1, 1277 CurrentBlock: uint64(chainA.len() - 1), 1278 HighestBlock: uint64(chainB.len() - 1), 1279 }) 1280 1281 // Check final progress after successful sync 1282 progress <- struct{}{} 1283 pending.Wait() 1284 checkProgress(t, tester.downloader, "final", quai.SyncProgress{ 1285 StartingBlock: uint64(testChainBase.len()) - 1, 1286 CurrentBlock: uint64(chainB.len() - 1), 1287 HighestBlock: uint64(chainB.len() - 1), 1288 }) 1289 } 1290 1291 // Tests that if synchronisation is aborted due to some failure, then the progress 1292 // origin is not updated in the next sync cycle, as it should be considered the 1293 // continuation of the previous sync and not a new instance. 1294 func TestFailedSyncProgress65Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, FullSync) } 1295 func TestFailedSyncProgress65Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, FastSync) } 1296 func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, LightSync) } 1297 1298 func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } 1299 func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FastSync) } 1300 func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } 1301 1302 func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1303 t.Parallel() 1304 1305 tester := newTester() 1306 defer tester.terminate() 1307 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1308 1309 // Set a sync init hook to catch progress changes 1310 starting := make(chan struct{}) 1311 progress := make(chan struct{}) 1312 1313 tester.downloader.syncInitHook = func(origin, latest uint64) { 1314 starting <- struct{}{} 1315 <-progress 1316 } 1317 checkProgress(t, tester.downloader, "pristine", quai.SyncProgress{}) 1318 1319 // Attempt a full sync with a faulty peer 1320 brokenChain := chain.shorten(chain.len()) 1321 missing := brokenChain.len() / 2 1322 delete(brokenChain.headerm, brokenChain.chain[missing]) 1323 delete(brokenChain.blockm, brokenChain.chain[missing]) 1324 tester.newPeer("faulty", protocol, brokenChain) 1325 1326 pending := new(sync.WaitGroup) 1327 pending.Add(1) 1328 go func() { 1329 defer pending.Done() 1330 if err := tester.sync("faulty", nil, mode); err == nil { 1331 panic("succeeded faulty synchronisation") 1332 } 1333 }() 1334 <-starting 1335 checkProgress(t, tester.downloader, "initial", quai.SyncProgress{ 1336 HighestBlock: uint64(brokenChain.len() - 1), 1337 }) 1338 progress <- struct{}{} 1339 pending.Wait() 1340 afterFailedSync := tester.downloader.Progress() 1341 1342 // Synchronise with a good peer and check that the progress origin remind the same 1343 // after a failure 1344 tester.newPeer("valid", protocol, chain) 1345 pending.Add(1) 1346 go func() { 1347 defer pending.Done() 1348 if err := tester.sync("valid", nil, mode); err != nil { 1349 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1350 } 1351 }() 1352 <-starting 1353 checkProgress(t, tester.downloader, "completing", afterFailedSync) 1354 1355 // Check final progress after successful sync 1356 progress <- struct{}{} 1357 pending.Wait() 1358 checkProgress(t, tester.downloader, "final", quai.SyncProgress{ 1359 CurrentBlock: uint64(chain.len() - 1), 1360 HighestBlock: uint64(chain.len() - 1), 1361 }) 1362 } 1363 1364 // Tests that if an attacker fakes a chain height, after the attack is detected, 1365 // the progress height is successfully reduced at the next sync invocation. 1366 func TestFakedSyncProgress65Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, FullSync) } 1367 func TestFakedSyncProgress65Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, FastSync) } 1368 func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, LightSync) } 1369 1370 func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } 1371 func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FastSync) } 1372 func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } 1373 1374 func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { 1375 t.Parallel() 1376 1377 tester := newTester() 1378 defer tester.terminate() 1379 chain := testChainBase.shorten(blockCacheMaxItems - 15) 1380 1381 // Set a sync init hook to catch progress changes 1382 starting := make(chan struct{}) 1383 progress := make(chan struct{}) 1384 tester.downloader.syncInitHook = func(origin, latest uint64) { 1385 starting <- struct{}{} 1386 <-progress 1387 } 1388 checkProgress(t, tester.downloader, "pristine", quai.SyncProgress{}) 1389 1390 // Create and sync with an attacker that promises a higher chain than available. 1391 brokenChain := chain.shorten(chain.len()) 1392 numMissing := 5 1393 for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- { 1394 delete(brokenChain.headerm, brokenChain.chain[i]) 1395 } 1396 tester.newPeer("attack", protocol, brokenChain) 1397 1398 pending := new(sync.WaitGroup) 1399 pending.Add(1) 1400 go func() { 1401 defer pending.Done() 1402 if err := tester.sync("attack", nil, mode); err == nil { 1403 panic("succeeded attacker synchronisation") 1404 } 1405 }() 1406 <-starting 1407 checkProgress(t, tester.downloader, "initial", quai.SyncProgress{ 1408 HighestBlock: uint64(brokenChain.len() - 1), 1409 }) 1410 progress <- struct{}{} 1411 pending.Wait() 1412 afterFailedSync := tester.downloader.Progress() 1413 1414 // Synchronise with a good peer and check that the progress height has been reduced to 1415 // the true value. 1416 validChain := chain.shorten(chain.len() - numMissing) 1417 tester.newPeer("valid", protocol, validChain) 1418 pending.Add(1) 1419 1420 go func() { 1421 defer pending.Done() 1422 if err := tester.sync("valid", nil, mode); err != nil { 1423 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1424 } 1425 }() 1426 <-starting 1427 checkProgress(t, tester.downloader, "completing", quai.SyncProgress{ 1428 CurrentBlock: afterFailedSync.CurrentBlock, 1429 HighestBlock: uint64(validChain.len() - 1), 1430 }) 1431 1432 // Check final progress after successful sync. 1433 progress <- struct{}{} 1434 pending.Wait() 1435 checkProgress(t, tester.downloader, "final", quai.SyncProgress{ 1436 CurrentBlock: uint64(validChain.len() - 1), 1437 HighestBlock: uint64(validChain.len() - 1), 1438 }) 1439 } 1440 1441 // This test reproduces an issue where unexpected deliveries would 1442 // block indefinitely if they arrived at the right time. 1443 func TestDeliverHeadersHang65Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, FullSync) } 1444 func TestDeliverHeadersHang65Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, FastSync) } 1445 func TestDeliverHeadersHang65Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, LightSync) } 1446 1447 func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FullSync) } 1448 func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FastSync) } 1449 func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) } 1450 1451 func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) { 1452 t.Parallel() 1453 1454 master := newTester() 1455 defer master.terminate() 1456 chain := testChainBase.shorten(15) 1457 1458 for i := 0; i < 200; i++ { 1459 tester := newTester() 1460 tester.peerDb = master.peerDb 1461 tester.newPeer("peer", protocol, chain) 1462 1463 // Whenever the downloader requests headers, flood it with 1464 // a lot of unrequested header deliveries. 1465 tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ 1466 peer: tester.downloader.peers.peers["peer"].peer, 1467 tester: tester, 1468 } 1469 if err := tester.sync("peer", nil, mode); err != nil { 1470 t.Errorf("test %d: sync failed: %v", i, err) 1471 } 1472 tester.terminate() 1473 } 1474 } 1475 1476 type floodingTestPeer struct { 1477 peer Peer 1478 tester *downloadTester 1479 } 1480 1481 func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } 1482 func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { 1483 return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) 1484 } 1485 func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { 1486 return ftp.peer.RequestBodies(hashes) 1487 } 1488 func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { 1489 return ftp.peer.RequestNodeData(hashes) 1490 } 1491 1492 func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { 1493 deliveriesDone := make(chan struct{}, 500) 1494 for i := 0; i < cap(deliveriesDone)-1; i++ { 1495 peer := fmt.Sprintf("fake-peer%d", i) 1496 go func() { 1497 ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) 1498 deliveriesDone <- struct{}{} 1499 }() 1500 } 1501 1502 // None of the extra deliveries should block. 1503 timeout := time.After(60 * time.Second) 1504 launched := false 1505 for i := 0; i < cap(deliveriesDone); i++ { 1506 select { 1507 case <-deliveriesDone: 1508 if !launched { 1509 // Start delivering the requested headers 1510 // after one of the flooding responses has arrived. 1511 go func() { 1512 ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) 1513 deliveriesDone <- struct{}{} 1514 }() 1515 launched = true 1516 } 1517 case <-timeout: 1518 panic("blocked") 1519 } 1520 } 1521 return nil 1522 } 1523 1524 func TestRemoteHeaderRequestSpan(t *testing.T) { 1525 testCases := []struct { 1526 remoteHeight uint64 1527 localHeight uint64 1528 expected []int 1529 }{ 1530 // Remote is way higher. We should ask for the remote head and go backwards 1531 {1500, 1000, 1532 []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, 1533 }, 1534 {15000, 13006, 1535 []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, 1536 }, 1537 // Remote is pretty close to us. We don't have to fetch as many 1538 {1200, 1150, 1539 []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, 1540 }, 1541 // Remote is equal to us (so on a fork with higher td) 1542 // We should get the closest couple of ancestors 1543 {1500, 1500, 1544 []int{1497, 1499}, 1545 }, 1546 // We're higher than the remote! Odd 1547 {1000, 1500, 1548 []int{997, 999}, 1549 }, 1550 // Check some weird edgecases that it behaves somewhat rationally 1551 {0, 1500, 1552 []int{0, 2}, 1553 }, 1554 {6000000, 0, 1555 []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, 1556 }, 1557 {0, 0, 1558 []int{0, 2}, 1559 }, 1560 } 1561 reqs := func(from, count, span int) []int { 1562 var r []int 1563 num := from 1564 for len(r) < count { 1565 r = append(r, num) 1566 num += span + 1 1567 } 1568 return r 1569 } 1570 for i, tt := range testCases { 1571 from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) 1572 data := reqs(int(from), count, span) 1573 1574 if max != uint64(data[len(data)-1]) { 1575 t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) 1576 } 1577 failed := false 1578 if len(data) != len(tt.expected) { 1579 failed = true 1580 t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) 1581 } else { 1582 for j, n := range data { 1583 if n != tt.expected[j] { 1584 failed = true 1585 break 1586 } 1587 } 1588 } 1589 if failed { 1590 res := strings.Replace(fmt.Sprint(data), " ", ",", -1) 1591 exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) 1592 t.Logf("got: %v\n", res) 1593 t.Logf("exp: %v\n", exp) 1594 t.Errorf("test %d: wrong values", i) 1595 } 1596 } 1597 } 1598 1599 // Tests that peers below a pre-configured checkpoint block are prevented from 1600 // being fast-synced from, avoiding potential cheap eclipse attacks. 1601 func TestCheckpointEnforcement65Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FullSync) } 1602 func TestCheckpointEnforcement65Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FastSync) } 1603 func TestCheckpointEnforcement65Light(t *testing.T) { 1604 testCheckpointEnforcement(t, eth.ETH65, LightSync) 1605 } 1606 1607 func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) } 1608 func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) } 1609 func TestCheckpointEnforcement66Light(t *testing.T) { 1610 testCheckpointEnforcement(t, eth.ETH66, LightSync) 1611 } 1612 1613 func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { 1614 t.Parallel() 1615 1616 // Create a new tester with a particular hard coded checkpoint block 1617 tester := newTester() 1618 defer tester.terminate() 1619 1620 tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256 1621 chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) 1622 1623 // Attempt to sync with the peer and validate the result 1624 tester.newPeer("peer", protocol, chain) 1625 1626 var expect error 1627 if mode == FastSync || mode == LightSync { 1628 expect = errUnsyncedPeer 1629 } 1630 if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) { 1631 t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) 1632 } 1633 if mode == FastSync || mode == LightSync { 1634 assertOwnChain(t, tester, 1) 1635 } else { 1636 assertOwnChain(t, tester, chain.len()) 1637 } 1638 }