github.com/RobustRoundRobin/quorum@v20.10.0+incompatible/eth/downloader/downloader_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "strings" 24 "sync" 25 "sync/atomic" 26 "testing" 27 "time" 28 29 "github.com/ethereum/go-ethereum" 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/core/rawdb" 32 "github.com/ethereum/go-ethereum/core/types" 33 "github.com/ethereum/go-ethereum/ethdb" 34 "github.com/ethereum/go-ethereum/event" 35 "github.com/ethereum/go-ethereum/params" 36 "github.com/ethereum/go-ethereum/trie" 37 ) 38 39 // Reduce some of the parameters to make the tester faster. 40 func init() { 41 maxForkAncestry = 10000 42 blockCacheItems = 1024 43 fsHeaderContCheck = 500 * time.Millisecond 44 45 // set immutability threshold to 10000 as well 46 params.SetQuorumImmutabilityThreshold(10000) 47 } 48 49 // downloadTester is a test simulator for mocking out local block chain. 50 type downloadTester struct { 51 downloader *Downloader 52 53 genesis *types.Block // Genesis blocks used by the tester and peers 54 stateDb ethdb.Database // Database used by the tester for syncing from peers 55 peerDb ethdb.Database // Database of the peers containing all data 56 peers map[string]*downloadTesterPeer 57 58 ownHashes []common.Hash // Hash chain belonging to the tester 59 ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester 60 ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester 61 ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester 62 ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain 63 64 ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester 65 ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester 66 ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester 67 ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain 68 69 lock sync.RWMutex 70 } 71 72 // newTester creates a new downloader test mocker. 73 func newTester() *downloadTester { 74 tester := &downloadTester{ 75 genesis: testGenesis, 76 peerDb: testDB, 77 peers: make(map[string]*downloadTesterPeer), 78 ownHashes: []common.Hash{testGenesis.Hash()}, 79 ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 80 ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 81 ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, 82 ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 83 84 // Initialize ancient store with test genesis block 85 ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 86 ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 87 ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, 88 ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 89 } 90 tester.stateDb = rawdb.NewMemoryDatabase() 91 tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) 92 93 tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer) 94 return tester 95 } 96 97 // terminate aborts any operations on the embedded downloader and releases all 98 // held resources. 99 func (dl *downloadTester) terminate() { 100 dl.downloader.Terminate() 101 } 102 103 // sync starts synchronizing with a remote peer, blocking until it completes. 104 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 105 dl.lock.RLock() 106 hash := dl.peers[id].chain.headBlock().Hash() 107 // If no particular TD was requested, load from the peer's blockchain 108 if td == nil { 109 td = dl.peers[id].chain.td(hash) 110 } 111 dl.lock.RUnlock() 112 113 // Synchronise with the chosen peer and ensure proper cleanup afterwards 114 err := dl.downloader.synchronise(id, hash, td, mode) 115 select { 116 case <-dl.downloader.cancelCh: 117 // Ok, downloader fully cancelled after sync cycle 118 default: 119 // Downloader is still accepting packets, can block a peer up 120 panic("downloader active post sync cycle") // panic will be caught by tester 121 } 122 return err 123 } 124 125 // HasHeader checks if a header is present in the testers canonical chain. 126 func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { 127 return dl.GetHeaderByHash(hash) != nil 128 } 129 130 // HasBlock checks if a block is present in the testers canonical chain. 131 func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { 132 return dl.GetBlockByHash(hash) != nil 133 } 134 135 // HasFastBlock checks if a block is present in the testers canonical chain. 136 func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { 137 dl.lock.RLock() 138 defer dl.lock.RUnlock() 139 140 if _, ok := dl.ancientReceipts[hash]; ok { 141 return true 142 } 143 _, ok := dl.ownReceipts[hash] 144 return ok 145 } 146 147 // GetHeader retrieves a header from the testers canonical chain. 148 func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { 149 dl.lock.RLock() 150 defer dl.lock.RUnlock() 151 152 header := dl.ancientHeaders[hash] 153 if header != nil { 154 return header 155 } 156 return dl.ownHeaders[hash] 157 } 158 159 // GetBlock retrieves a block from the testers canonical chain. 160 func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { 161 dl.lock.RLock() 162 defer dl.lock.RUnlock() 163 164 block := dl.ancientBlocks[hash] 165 if block != nil { 166 return block 167 } 168 return dl.ownBlocks[hash] 169 } 170 171 // CurrentHeader retrieves the current head header from the canonical chain. 172 func (dl *downloadTester) CurrentHeader() *types.Header { 173 dl.lock.RLock() 174 defer dl.lock.RUnlock() 175 176 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 177 if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil { 178 return header 179 } 180 if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { 181 return header 182 } 183 } 184 return dl.genesis.Header() 185 } 186 187 // CurrentBlock retrieves the current head block from the canonical chain. 188 func (dl *downloadTester) CurrentBlock() *types.Block { 189 dl.lock.RLock() 190 defer dl.lock.RUnlock() 191 192 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 193 if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { 194 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 195 return block 196 } 197 return block 198 } 199 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 200 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 201 return block 202 } 203 } 204 } 205 return dl.genesis 206 } 207 208 // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. 209 func (dl *downloadTester) CurrentFastBlock() *types.Block { 210 dl.lock.RLock() 211 defer dl.lock.RUnlock() 212 213 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 214 if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { 215 return block 216 } 217 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 218 return block 219 } 220 } 221 return dl.genesis 222 } 223 224 // FastSyncCommitHead manually sets the head block to a given hash. 225 func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { 226 // For now only check that the state trie is correct 227 if block := dl.GetBlockByHash(hash); block != nil { 228 _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb)) 229 return err 230 } 231 return fmt.Errorf("non existent block: %x", hash[:4]) 232 } 233 234 // GetTd retrieves the block's total difficulty from the canonical chain. 235 func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { 236 dl.lock.RLock() 237 defer dl.lock.RUnlock() 238 239 if td := dl.ancientChainTd[hash]; td != nil { 240 return td 241 } 242 return dl.ownChainTd[hash] 243 } 244 245 // InsertHeaderChain injects a new batch of headers into the simulated chain. 246 func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) { 247 dl.lock.Lock() 248 defer dl.lock.Unlock() 249 250 // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors 251 if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok { 252 return 0, errors.New("unknown parent") 253 } 254 for i := 1; i < len(headers); i++ { 255 if headers[i].ParentHash != headers[i-1].Hash() { 256 return i, errors.New("unknown parent") 257 } 258 } 259 // Do a full insert if pre-checks passed 260 for i, header := range headers { 261 if _, ok := dl.ownHeaders[header.Hash()]; ok { 262 continue 263 } 264 if _, ok := dl.ownHeaders[header.ParentHash]; !ok { 265 return i, errors.New("unknown parent") 266 } 267 dl.ownHashes = append(dl.ownHashes, header.Hash()) 268 dl.ownHeaders[header.Hash()] = header 269 dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty) 270 } 271 return len(headers), nil 272 } 273 274 // InsertChain injects a new batch of blocks into the simulated chain. 275 func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) { 276 dl.lock.Lock() 277 defer dl.lock.Unlock() 278 279 for i, block := range blocks { 280 if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { 281 return i, errors.New("unknown parent") 282 } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { 283 return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err) 284 } 285 if _, ok := dl.ownHeaders[block.Hash()]; !ok { 286 dl.ownHashes = append(dl.ownHashes, block.Hash()) 287 dl.ownHeaders[block.Hash()] = block.Header() 288 } 289 dl.ownBlocks[block.Hash()] = block 290 dl.ownReceipts[block.Hash()] = make(types.Receipts, 0) 291 dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) 292 dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty()) 293 } 294 return len(blocks), nil 295 } 296 297 // InsertReceiptChain injects a new batch of receipts into the simulated chain. 298 func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) { 299 dl.lock.Lock() 300 defer dl.lock.Unlock() 301 302 for i := 0; i < len(blocks) && i < len(receipts); i++ { 303 if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { 304 return i, errors.New("unknown owner") 305 } 306 if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok { 307 if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { 308 return i, errors.New("unknown parent") 309 } 310 } 311 if blocks[i].NumberU64() <= ancientLimit { 312 dl.ancientBlocks[blocks[i].Hash()] = blocks[i] 313 dl.ancientReceipts[blocks[i].Hash()] = receipts[i] 314 315 // Migrate from active db to ancient db 316 dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header() 317 dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty()) 318 319 delete(dl.ownHeaders, blocks[i].Hash()) 320 delete(dl.ownChainTd, blocks[i].Hash()) 321 } else { 322 dl.ownBlocks[blocks[i].Hash()] = blocks[i] 323 dl.ownReceipts[blocks[i].Hash()] = receipts[i] 324 } 325 } 326 return len(blocks), nil 327 } 328 329 // Rollback removes some recently added elements from the chain. 330 func (dl *downloadTester) Rollback(hashes []common.Hash) { 331 dl.lock.Lock() 332 defer dl.lock.Unlock() 333 334 for i := len(hashes) - 1; i >= 0; i-- { 335 if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { 336 dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] 337 } 338 delete(dl.ownChainTd, hashes[i]) 339 delete(dl.ownHeaders, hashes[i]) 340 delete(dl.ownReceipts, hashes[i]) 341 delete(dl.ownBlocks, hashes[i]) 342 343 delete(dl.ancientChainTd, hashes[i]) 344 delete(dl.ancientHeaders, hashes[i]) 345 delete(dl.ancientReceipts, hashes[i]) 346 delete(dl.ancientBlocks, hashes[i]) 347 } 348 } 349 350 // newPeer registers a new block download source into the downloader. 351 func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error { 352 dl.lock.Lock() 353 defer dl.lock.Unlock() 354 355 peer := &downloadTesterPeer{dl: dl, id: id, chain: chain} 356 dl.peers[id] = peer 357 return dl.downloader.RegisterPeer(id, version, peer) 358 } 359 360 // dropPeer simulates a hard peer removal from the connection pool. 361 func (dl *downloadTester) dropPeer(id string) { 362 dl.lock.Lock() 363 defer dl.lock.Unlock() 364 365 delete(dl.peers, id) 366 dl.downloader.UnregisterPeer(id) 367 } 368 369 type downloadTesterPeer struct { 370 dl *downloadTester 371 id string 372 lock sync.RWMutex 373 chain *testChain 374 missingStates map[common.Hash]bool // State entries that fast sync should not return 375 } 376 377 // Head constructs a function to retrieve a peer's current head hash 378 // and total difficulty. 379 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 380 b := dlp.chain.headBlock() 381 return b.Hash(), dlp.chain.td(b.Hash()) 382 } 383 384 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 385 // origin; associated with a particular peer in the download tester. The returned 386 // function can be used to retrieve batches of headers from the particular peer. 387 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { 388 if reverse { 389 panic("reverse header requests not supported") 390 } 391 392 result := dlp.chain.headersByHash(origin, amount, skip) 393 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 394 return nil 395 } 396 397 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 398 // origin; associated with a particular peer in the download tester. The returned 399 // function can be used to retrieve batches of headers from the particular peer. 400 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { 401 if reverse { 402 panic("reverse header requests not supported") 403 } 404 405 result := dlp.chain.headersByNumber(origin, amount, skip) 406 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 407 return nil 408 } 409 410 // RequestBodies constructs a getBlockBodies method associated with a particular 411 // peer in the download tester. The returned function can be used to retrieve 412 // batches of block bodies from the particularly requested peer. 413 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { 414 txs, uncles := dlp.chain.bodies(hashes) 415 go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles) 416 return nil 417 } 418 419 // RequestReceipts constructs a getReceipts method associated with a particular 420 // peer in the download tester. The returned function can be used to retrieve 421 // batches of block receipts from the particularly requested peer. 422 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { 423 receipts := dlp.chain.receipts(hashes) 424 go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts) 425 return nil 426 } 427 428 // RequestNodeData constructs a getNodeData method associated with a particular 429 // peer in the download tester. The returned function can be used to retrieve 430 // batches of node state data from the particularly requested peer. 431 func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { 432 dlp.dl.lock.RLock() 433 defer dlp.dl.lock.RUnlock() 434 435 results := make([][]byte, 0, len(hashes)) 436 for _, hash := range hashes { 437 if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { 438 if !dlp.missingStates[hash] { 439 results = append(results, data) 440 } 441 } 442 } 443 go dlp.dl.downloader.DeliverNodeData(dlp.id, results) 444 return nil 445 } 446 447 // assertOwnChain checks if the local chain contains the correct number of items 448 // of the various chain components. 449 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 450 // Mark this method as a helper to report errors at callsite, not in here 451 t.Helper() 452 453 assertOwnForkedChain(t, tester, 1, []int{length}) 454 } 455 456 // assertOwnForkedChain checks if the local forked chain contains the correct 457 // number of items of the various chain components. 458 func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { 459 // Mark this method as a helper to report errors at callsite, not in here 460 t.Helper() 461 462 // Initialize the counters for the first fork 463 headers, blocks, receipts := lengths[0], lengths[0], lengths[0] 464 465 // Update the counters for each subsequent fork 466 for _, length := range lengths[1:] { 467 headers += length - common 468 blocks += length - common 469 receipts += length - common 470 } 471 if tester.downloader.mode == LightSync { 472 blocks, receipts = 1, 1 473 } 474 if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers { 475 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 476 } 477 if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks { 478 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 479 } 480 if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts { 481 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 482 } 483 } 484 485 // Tests that simple synchronization against a canonical chain works correctly. 486 // In this test common ancestor lookup should be short circuited and not require 487 // binary searching. 488 func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) } 489 func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) } 490 func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) } 491 func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } 492 func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } 493 func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) } 494 495 func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { 496 t.Parallel() 497 498 tester := newTester() 499 defer tester.terminate() 500 501 // Create a small enough block chain to download 502 chain := testChainBase.shorten(blockCacheItems - 15) 503 tester.newPeer("peer", protocol, chain) 504 505 // Synchronise with the peer and make sure all relevant data was retrieved 506 if err := tester.sync("peer", nil, mode); err != nil { 507 t.Fatalf("failed to synchronise blocks: %v", err) 508 } 509 assertOwnChain(t, tester, chain.len()) 510 } 511 512 // Tests that if a large batch of blocks are being downloaded, it is throttled 513 // until the cached blocks are retrieved. 514 func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) } 515 func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) } 516 func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) } 517 func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } 518 func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } 519 520 func testThrottling(t *testing.T, protocol int, mode SyncMode) { 521 t.Parallel() 522 tester := newTester() 523 defer tester.terminate() 524 525 // Create a long block chain to download and the tester 526 targetBlocks := testChainBase.len() - 1 527 tester.newPeer("peer", protocol, testChainBase) 528 529 // Wrap the importer to allow stepping 530 blocked, proceed := uint32(0), make(chan struct{}) 531 tester.downloader.chainInsertHook = func(results []*fetchResult) { 532 atomic.StoreUint32(&blocked, uint32(len(results))) 533 <-proceed 534 } 535 // Start a synchronisation concurrently 536 errc := make(chan error) 537 go func() { 538 errc <- tester.sync("peer", nil, mode) 539 }() 540 // Iteratively take some blocks, always checking the retrieval count 541 for { 542 // Check the retrieval count synchronously (! reason for this ugly block) 543 tester.lock.RLock() 544 retrieved := len(tester.ownBlocks) 545 tester.lock.RUnlock() 546 if retrieved >= targetBlocks+1 { 547 break 548 } 549 // Wait a bit for sync to throttle itself 550 var cached, frozen int 551 for start := time.Now(); time.Since(start) < 3*time.Second; { 552 time.Sleep(25 * time.Millisecond) 553 554 tester.lock.Lock() 555 tester.downloader.queue.lock.Lock() 556 cached = len(tester.downloader.queue.blockDonePool) 557 if mode == FastSync { 558 if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { 559 cached = receipts 560 } 561 } 562 frozen = int(atomic.LoadUint32(&blocked)) 563 retrieved = len(tester.ownBlocks) 564 tester.downloader.queue.lock.Unlock() 565 tester.lock.Unlock() 566 567 if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { 568 break 569 } 570 } 571 // Make sure we filled up the cache, then exhaust it 572 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 573 574 tester.lock.RLock() 575 retrieved = len(tester.ownBlocks) 576 tester.lock.RUnlock() 577 if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { 578 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1) 579 } 580 // Permit the blocked blocks to import 581 if atomic.LoadUint32(&blocked) > 0 { 582 atomic.StoreUint32(&blocked, uint32(0)) 583 proceed <- struct{}{} 584 } 585 } 586 // Check that we haven't pulled more blocks than available 587 assertOwnChain(t, tester, targetBlocks+1) 588 if err := <-errc; err != nil { 589 t.Fatalf("block synchronization failed: %v", err) 590 } 591 } 592 593 // Tests that simple synchronization against a forked chain works correctly. In 594 // this test common ancestor lookup should *not* be short circuited, and a full 595 // binary search should be executed. 596 func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) } 597 func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) } 598 func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) } 599 func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } 600 func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } 601 func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) } 602 603 func testForkedSync(t *testing.T, protocol int, mode SyncMode) { 604 t.Parallel() 605 606 tester := newTester() 607 defer tester.terminate() 608 609 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 610 chainB := testChainForkLightB.shorten(testChainBase.len() + 80) 611 tester.newPeer("fork A", protocol, chainA) 612 tester.newPeer("fork B", protocol, chainB) 613 614 // Synchronise with the peer and make sure all blocks were retrieved 615 if err := tester.sync("fork A", nil, mode); err != nil { 616 t.Fatalf("failed to synchronise blocks: %v", err) 617 } 618 assertOwnChain(t, tester, chainA.len()) 619 620 // Synchronise with the second peer and make sure that fork is pulled too 621 if err := tester.sync("fork B", nil, mode); err != nil { 622 t.Fatalf("failed to synchronise blocks: %v", err) 623 } 624 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 625 } 626 627 // Tests that synchronising against a much shorter but much heavyer fork works 628 // corrently and is not dropped. 629 func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) } 630 func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) } 631 func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) } 632 func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } 633 func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } 634 func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) } 635 636 func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 637 t.Parallel() 638 639 tester := newTester() 640 defer tester.terminate() 641 642 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 643 chainB := testChainForkHeavy.shorten(testChainBase.len() + 80) 644 tester.newPeer("light", protocol, chainA) 645 tester.newPeer("heavy", protocol, chainB) 646 647 // Synchronise with the peer and make sure all blocks were retrieved 648 if err := tester.sync("light", nil, mode); err != nil { 649 t.Fatalf("failed to synchronise blocks: %v", err) 650 } 651 assertOwnChain(t, tester, chainA.len()) 652 653 // Synchronise with the second peer and make sure that fork is pulled too 654 if err := tester.sync("heavy", nil, mode); err != nil { 655 t.Fatalf("failed to synchronise blocks: %v", err) 656 } 657 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 658 } 659 660 // Tests that chain forks are contained within a certain interval of the current 661 // chain head, ensuring that malicious peers cannot waste resources by feeding 662 // long dead chains. 663 func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) } 664 func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) } 665 func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) } 666 func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } 667 func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } 668 func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) } 669 670 func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { 671 t.Parallel() 672 673 tester := newTester() 674 defer tester.terminate() 675 676 chainA := testChainForkLightA 677 chainB := testChainForkLightB 678 tester.newPeer("original", protocol, chainA) 679 tester.newPeer("rewriter", protocol, chainB) 680 681 // Synchronise with the peer and make sure all blocks were retrieved 682 if err := tester.sync("original", nil, mode); err != nil { 683 t.Fatalf("failed to synchronise blocks: %v", err) 684 } 685 assertOwnChain(t, tester, chainA.len()) 686 687 // Synchronise with the second peer and ensure that the fork is rejected to being too old 688 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 689 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 690 } 691 } 692 693 // Tests that chain forks are contained within a certain interval of the current 694 // chain head for short but heavy forks too. These are a bit special because they 695 // take different ancestor lookup paths. 696 func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) } 697 func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) } 698 func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) } 699 func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } 700 func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } 701 func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) } 702 703 func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 704 t.Parallel() 705 706 tester := newTester() 707 defer tester.terminate() 708 709 // Create a long enough forked chain 710 chainA := testChainForkLightA 711 chainB := testChainForkHeavy 712 tester.newPeer("original", protocol, chainA) 713 tester.newPeer("heavy-rewriter", protocol, chainB) 714 715 // Synchronise with the peer and make sure all blocks were retrieved 716 if err := tester.sync("original", nil, mode); err != nil { 717 t.Fatalf("failed to synchronise blocks: %v", err) 718 } 719 assertOwnChain(t, tester, chainA.len()) 720 721 // Synchronise with the second peer and ensure that the fork is rejected to being too old 722 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 723 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 724 } 725 } 726 727 // Tests that an inactive downloader will not accept incoming block headers and 728 // bodies. 729 func TestInactiveDownloader62(t *testing.T) { 730 t.Parallel() 731 732 tester := newTester() 733 defer tester.terminate() 734 735 // Check that neither block headers nor bodies are accepted 736 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 737 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 738 } 739 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 740 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 741 } 742 } 743 744 // Tests that an inactive downloader will not accept incoming block headers, 745 // bodies and receipts. 746 func TestInactiveDownloader63(t *testing.T) { 747 t.Parallel() 748 749 tester := newTester() 750 defer tester.terminate() 751 752 // Check that neither block headers nor bodies are accepted 753 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 754 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 755 } 756 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 757 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 758 } 759 if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { 760 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 761 } 762 } 763 764 // Tests that a canceled download wipes all previously accumulated state. 765 func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) } 766 func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) } 767 func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) } 768 func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } 769 func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } 770 func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) } 771 772 func testCancel(t *testing.T, protocol int, mode SyncMode) { 773 t.Parallel() 774 775 tester := newTester() 776 defer tester.terminate() 777 778 chain := testChainBase.shorten(MaxHeaderFetch) 779 tester.newPeer("peer", protocol, chain) 780 781 // Make sure canceling works with a pristine downloader 782 tester.downloader.Cancel() 783 if !tester.downloader.queue.Idle() { 784 t.Errorf("download queue not idle") 785 } 786 // Synchronise with the peer, but cancel afterwards 787 if err := tester.sync("peer", nil, mode); err != nil { 788 t.Fatalf("failed to synchronise blocks: %v", err) 789 } 790 tester.downloader.Cancel() 791 if !tester.downloader.queue.Idle() { 792 t.Errorf("download queue not idle") 793 } 794 } 795 796 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 797 func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) } 798 func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) } 799 func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) } 800 func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } 801 func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } 802 func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) } 803 804 func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { 805 t.Parallel() 806 807 tester := newTester() 808 defer tester.terminate() 809 810 // Create various peers with various parts of the chain 811 targetPeers := 8 812 chain := testChainBase.shorten(targetPeers * 100) 813 814 for i := 0; i < targetPeers; i++ { 815 id := fmt.Sprintf("peer #%d", i) 816 tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1))) 817 } 818 if err := tester.sync("peer #0", nil, mode); err != nil { 819 t.Fatalf("failed to synchronise blocks: %v", err) 820 } 821 assertOwnChain(t, tester, chain.len()) 822 } 823 824 // Tests that synchronisations behave well in multi-version protocol environments 825 // and not wreak havoc on other nodes in the network. 826 func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) } 827 func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) } 828 func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) } 829 func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } 830 func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } 831 func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) } 832 833 func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { 834 t.Parallel() 835 836 tester := newTester() 837 defer tester.terminate() 838 839 // Create a small enough block chain to download 840 chain := testChainBase.shorten(blockCacheItems - 15) 841 842 // Create peers of every type 843 tester.newPeer("peer 62", 62, chain) 844 tester.newPeer("peer 63", 63, chain) 845 tester.newPeer("peer 64", 64, chain) 846 847 // Synchronise with the requested peer and make sure all blocks were retrieved 848 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 849 t.Fatalf("failed to synchronise blocks: %v", err) 850 } 851 assertOwnChain(t, tester, chain.len()) 852 853 // Check that no peers have been dropped off 854 for _, version := range []int{62, 63, 64} { 855 peer := fmt.Sprintf("peer %d", version) 856 if _, ok := tester.peers[peer]; !ok { 857 t.Errorf("%s dropped", peer) 858 } 859 } 860 } 861 862 // Tests that if a block is empty (e.g. header only), no body request should be 863 // made, and instead the header should be assembled into a whole block in itself. 864 func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) } 865 func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) } 866 func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) } 867 func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } 868 func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } 869 func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) } 870 871 func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { 872 t.Parallel() 873 874 tester := newTester() 875 defer tester.terminate() 876 877 // Create a block chain to download 878 chain := testChainBase 879 tester.newPeer("peer", protocol, chain) 880 881 // Instrument the downloader to signal body requests 882 bodiesHave, receiptsHave := int32(0), int32(0) 883 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 884 atomic.AddInt32(&bodiesHave, int32(len(headers))) 885 } 886 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 887 atomic.AddInt32(&receiptsHave, int32(len(headers))) 888 } 889 // Synchronise with the peer and make sure all blocks were retrieved 890 if err := tester.sync("peer", nil, mode); err != nil { 891 t.Fatalf("failed to synchronise blocks: %v", err) 892 } 893 assertOwnChain(t, tester, chain.len()) 894 895 // Validate the number of block bodies that should have been requested 896 bodiesNeeded, receiptsNeeded := 0, 0 897 for _, block := range chain.blockm { 898 if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 899 bodiesNeeded++ 900 } 901 } 902 for _, receipt := range chain.receiptm { 903 if mode == FastSync && len(receipt) > 0 { 904 receiptsNeeded++ 905 } 906 } 907 if int(bodiesHave) != bodiesNeeded { 908 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 909 } 910 if int(receiptsHave) != receiptsNeeded { 911 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) 912 } 913 } 914 915 // Tests that headers are enqueued continuously, preventing malicious nodes from 916 // stalling the downloader by feeding gapped header chains. 917 func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) } 918 func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) } 919 func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) } 920 func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } 921 func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } 922 func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) } 923 924 func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 925 t.Parallel() 926 927 tester := newTester() 928 defer tester.terminate() 929 930 chain := testChainBase.shorten(blockCacheItems - 15) 931 brokenChain := chain.shorten(chain.len()) 932 delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2]) 933 tester.newPeer("attack", protocol, brokenChain) 934 935 if err := tester.sync("attack", nil, mode); err == nil { 936 t.Fatalf("succeeded attacker synchronisation") 937 } 938 // Synchronise with the valid peer and make sure sync succeeds 939 tester.newPeer("valid", protocol, chain) 940 if err := tester.sync("valid", nil, mode); err != nil { 941 t.Fatalf("failed to synchronise blocks: %v", err) 942 } 943 assertOwnChain(t, tester, chain.len()) 944 } 945 946 // Tests that if requested headers are shifted (i.e. first is missing), the queue 947 // detects the invalid numbering. 948 func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) } 949 func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) } 950 func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) } 951 func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } 952 func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } 953 func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) } 954 955 func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 956 t.Parallel() 957 958 tester := newTester() 959 defer tester.terminate() 960 961 chain := testChainBase.shorten(blockCacheItems - 15) 962 963 // Attempt a full sync with an attacker feeding shifted headers 964 brokenChain := chain.shorten(chain.len()) 965 delete(brokenChain.headerm, brokenChain.chain[1]) 966 delete(brokenChain.blockm, brokenChain.chain[1]) 967 delete(brokenChain.receiptm, brokenChain.chain[1]) 968 tester.newPeer("attack", protocol, brokenChain) 969 if err := tester.sync("attack", nil, mode); err == nil { 970 t.Fatalf("succeeded attacker synchronisation") 971 } 972 973 // Synchronise with the valid peer and make sure sync succeeds 974 tester.newPeer("valid", protocol, chain) 975 if err := tester.sync("valid", nil, mode); err != nil { 976 t.Fatalf("failed to synchronise blocks: %v", err) 977 } 978 assertOwnChain(t, tester, chain.len()) 979 } 980 981 // Tests that upon detecting an invalid header, the recent ones are rolled back 982 // for various failure scenarios. Afterwards a full sync is attempted to make 983 // sure no state was corrupted. 984 func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) } 985 func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } 986 func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) } 987 988 func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { 989 t.Parallel() 990 991 tester := newTester() 992 defer tester.terminate() 993 994 // Create a small enough block chain to download 995 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 996 chain := testChainBase.shorten(targetBlocks) 997 998 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 999 // This should result in the last fsHeaderSafetyNet headers being rolled back. 1000 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 1001 fastAttackChain := chain.shorten(chain.len()) 1002 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) 1003 tester.newPeer("fast-attack", protocol, fastAttackChain) 1004 1005 if err := tester.sync("fast-attack", nil, mode); err == nil { 1006 t.Fatalf("succeeded fast attacker synchronisation") 1007 } 1008 if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { 1009 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 1010 } 1011 1012 // Attempt to sync with an attacker that feeds junk during the block import phase. 1013 // This should result in both the last fsHeaderSafetyNet number of headers being 1014 // rolled back, and also the pivot point being reverted to a non-block status. 1015 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 1016 blockAttackChain := chain.shorten(chain.len()) 1017 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in 1018 delete(blockAttackChain.headerm, blockAttackChain.chain[missing]) 1019 tester.newPeer("block-attack", protocol, blockAttackChain) 1020 1021 if err := tester.sync("block-attack", nil, mode); err == nil { 1022 t.Fatalf("succeeded block attacker synchronisation") 1023 } 1024 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1025 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1026 } 1027 if mode == FastSync { 1028 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1029 t.Errorf("fast sync pivot block #%d not rolled back", head) 1030 } 1031 } 1032 1033 // Attempt to sync with an attacker that withholds promised blocks after the 1034 // fast sync pivot point. This could be a trial to leave the node with a bad 1035 // but already imported pivot block. 1036 withholdAttackChain := chain.shorten(chain.len()) 1037 tester.newPeer("withhold-attack", protocol, withholdAttackChain) 1038 tester.downloader.syncInitHook = func(uint64, uint64) { 1039 for i := missing; i < withholdAttackChain.len(); i++ { 1040 delete(withholdAttackChain.headerm, withholdAttackChain.chain[i]) 1041 } 1042 tester.downloader.syncInitHook = nil 1043 } 1044 if err := tester.sync("withhold-attack", nil, mode); err == nil { 1045 t.Fatalf("succeeded withholding attacker synchronisation") 1046 } 1047 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1048 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1049 } 1050 if mode == FastSync { 1051 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1052 t.Errorf("fast sync pivot block #%d not rolled back", head) 1053 } 1054 } 1055 1056 // synchronise with the valid peer and make sure sync succeeds. Since the last rollback 1057 // should also disable fast syncing for this process, verify that we did a fresh full 1058 // sync. Note, we can't assert anything about the receipts since we won't purge the 1059 // database of them, hence we can't use assertOwnChain. 1060 tester.newPeer("valid", protocol, chain) 1061 if err := tester.sync("valid", nil, mode); err != nil { 1062 t.Fatalf("failed to synchronise blocks: %v", err) 1063 } 1064 if hs := len(tester.ownHeaders); hs != chain.len() { 1065 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len()) 1066 } 1067 if mode != LightSync { 1068 if bs := len(tester.ownBlocks); bs != chain.len() { 1069 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len()) 1070 } 1071 } 1072 } 1073 1074 // Tests that a peer advertising an high TD doesn't get to stall the downloader 1075 // afterwards by not sending any useful hashes. 1076 func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) } 1077 func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) } 1078 func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) } 1079 func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } 1080 func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } 1081 func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) } 1082 1083 func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { 1084 t.Parallel() 1085 1086 tester := newTester() 1087 defer tester.terminate() 1088 1089 chain := testChainBase.shorten(1) 1090 tester.newPeer("attack", protocol, chain) 1091 if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { 1092 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 1093 } 1094 } 1095 1096 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 1097 func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) } 1098 func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) } 1099 func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } 1100 1101 func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { 1102 t.Parallel() 1103 1104 // Define the disconnection requirement for individual hash fetch errors 1105 tests := []struct { 1106 result error 1107 drop bool 1108 }{ 1109 {nil, false}, // Sync succeeded, all is well 1110 {errBusy, false}, // Sync is already in progress, no problem 1111 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1112 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1113 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1114 {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it 1115 {errNoPeers, false}, // No peers to download from, soft race, no issue 1116 {errTimeout, true}, // No hashes received in due time, drop the peer 1117 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1118 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1119 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1120 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1121 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1122 {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin 1123 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1124 } 1125 // Run the tests and check disconnection status 1126 tester := newTester() 1127 defer tester.terminate() 1128 chain := testChainBase.shorten(1) 1129 1130 for i, tt := range tests { 1131 // Register a new peer and ensure it's presence 1132 id := fmt.Sprintf("test %d", i) 1133 if err := tester.newPeer(id, protocol, chain); err != nil { 1134 t.Fatalf("test %d: failed to register new peer: %v", i, err) 1135 } 1136 if _, ok := tester.peers[id]; !ok { 1137 t.Fatalf("test %d: registered peer not found", i) 1138 } 1139 // Simulate a synchronisation and check the required result 1140 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1141 1142 tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) 1143 if _, ok := tester.peers[id]; !ok != tt.drop { 1144 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1145 } 1146 } 1147 } 1148 1149 // Tests that synchronisation progress (origin block number, current block number 1150 // and highest block number) is tracked and updated correctly. 1151 func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) } 1152 func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) } 1153 func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) } 1154 func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } 1155 func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } 1156 func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) } 1157 1158 func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1159 t.Parallel() 1160 1161 tester := newTester() 1162 defer tester.terminate() 1163 chain := testChainBase.shorten(blockCacheItems - 15) 1164 1165 // Set a sync init hook to catch progress changes 1166 starting := make(chan struct{}) 1167 progress := make(chan struct{}) 1168 1169 tester.downloader.syncInitHook = func(origin, latest uint64) { 1170 starting <- struct{}{} 1171 <-progress 1172 } 1173 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1174 1175 // Synchronise half the blocks and check initial progress 1176 tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2)) 1177 pending := new(sync.WaitGroup) 1178 pending.Add(1) 1179 1180 go func() { 1181 defer pending.Done() 1182 if err := tester.sync("peer-half", nil, mode); err != nil { 1183 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1184 } 1185 }() 1186 <-starting 1187 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1188 HighestBlock: uint64(chain.len()/2 - 1), 1189 }) 1190 progress <- struct{}{} 1191 pending.Wait() 1192 1193 // Synchronise all the blocks and check continuation progress 1194 tester.newPeer("peer-full", protocol, chain) 1195 pending.Add(1) 1196 go func() { 1197 defer pending.Done() 1198 if err := tester.sync("peer-full", nil, mode); err != nil { 1199 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1200 } 1201 }() 1202 <-starting 1203 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1204 StartingBlock: uint64(chain.len()/2 - 1), 1205 CurrentBlock: uint64(chain.len()/2 - 1), 1206 HighestBlock: uint64(chain.len() - 1), 1207 }) 1208 1209 // Check final progress after successful sync 1210 progress <- struct{}{} 1211 pending.Wait() 1212 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1213 StartingBlock: uint64(chain.len()/2 - 1), 1214 CurrentBlock: uint64(chain.len() - 1), 1215 HighestBlock: uint64(chain.len() - 1), 1216 }) 1217 } 1218 1219 func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { 1220 // Mark this method as a helper to report errors at callsite, not in here 1221 t.Helper() 1222 1223 p := d.Progress() 1224 p.KnownStates, p.PulledStates = 0, 0 1225 want.KnownStates, want.PulledStates = 0, 0 1226 if p != want { 1227 t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) 1228 } 1229 } 1230 1231 // Tests that synchronisation progress (origin block number and highest block 1232 // number) is tracked and updated correctly in case of a fork (or manual head 1233 // revertal). 1234 func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) } 1235 func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) } 1236 func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) } 1237 func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } 1238 func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } 1239 func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) } 1240 1241 func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1242 t.Parallel() 1243 1244 tester := newTester() 1245 defer tester.terminate() 1246 chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch) 1247 chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch) 1248 1249 // Set a sync init hook to catch progress changes 1250 starting := make(chan struct{}) 1251 progress := make(chan struct{}) 1252 1253 tester.downloader.syncInitHook = func(origin, latest uint64) { 1254 starting <- struct{}{} 1255 <-progress 1256 } 1257 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1258 1259 // Synchronise with one of the forks and check progress 1260 tester.newPeer("fork A", protocol, chainA) 1261 pending := new(sync.WaitGroup) 1262 pending.Add(1) 1263 go func() { 1264 defer pending.Done() 1265 if err := tester.sync("fork A", nil, mode); err != nil { 1266 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1267 } 1268 }() 1269 <-starting 1270 1271 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1272 HighestBlock: uint64(chainA.len() - 1), 1273 }) 1274 progress <- struct{}{} 1275 pending.Wait() 1276 1277 // Simulate a successful sync above the fork 1278 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1279 1280 // Synchronise with the second fork and check progress resets 1281 tester.newPeer("fork B", protocol, chainB) 1282 pending.Add(1) 1283 go func() { 1284 defer pending.Done() 1285 if err := tester.sync("fork B", nil, mode); err != nil { 1286 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1287 } 1288 }() 1289 <-starting 1290 checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ 1291 StartingBlock: uint64(testChainBase.len()) - 1, 1292 CurrentBlock: uint64(chainA.len() - 1), 1293 HighestBlock: uint64(chainB.len() - 1), 1294 }) 1295 1296 // Check final progress after successful sync 1297 progress <- struct{}{} 1298 pending.Wait() 1299 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1300 StartingBlock: uint64(testChainBase.len()) - 1, 1301 CurrentBlock: uint64(chainB.len() - 1), 1302 HighestBlock: uint64(chainB.len() - 1), 1303 }) 1304 } 1305 1306 // Tests that if synchronisation is aborted due to some failure, then the progress 1307 // origin is not updated in the next sync cycle, as it should be considered the 1308 // continuation of the previous sync and not a new instance. 1309 func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) } 1310 func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) } 1311 func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) } 1312 func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } 1313 func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } 1314 func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) } 1315 1316 func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1317 t.Parallel() 1318 1319 tester := newTester() 1320 defer tester.terminate() 1321 chain := testChainBase.shorten(blockCacheItems - 15) 1322 1323 // Set a sync init hook to catch progress changes 1324 starting := make(chan struct{}) 1325 progress := make(chan struct{}) 1326 1327 tester.downloader.syncInitHook = func(origin, latest uint64) { 1328 starting <- struct{}{} 1329 <-progress 1330 } 1331 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1332 1333 // Attempt a full sync with a faulty peer 1334 brokenChain := chain.shorten(chain.len()) 1335 missing := brokenChain.len() / 2 1336 delete(brokenChain.headerm, brokenChain.chain[missing]) 1337 delete(brokenChain.blockm, brokenChain.chain[missing]) 1338 delete(brokenChain.receiptm, brokenChain.chain[missing]) 1339 tester.newPeer("faulty", protocol, brokenChain) 1340 1341 pending := new(sync.WaitGroup) 1342 pending.Add(1) 1343 go func() { 1344 defer pending.Done() 1345 if err := tester.sync("faulty", nil, mode); err == nil { 1346 panic("succeeded faulty synchronisation") 1347 } 1348 }() 1349 <-starting 1350 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1351 HighestBlock: uint64(brokenChain.len() - 1), 1352 }) 1353 progress <- struct{}{} 1354 pending.Wait() 1355 afterFailedSync := tester.downloader.Progress() 1356 1357 // Synchronise with a good peer and check that the progress origin remind the same 1358 // after a failure 1359 tester.newPeer("valid", protocol, chain) 1360 pending.Add(1) 1361 go func() { 1362 defer pending.Done() 1363 if err := tester.sync("valid", nil, mode); err != nil { 1364 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1365 } 1366 }() 1367 <-starting 1368 checkProgress(t, tester.downloader, "completing", afterFailedSync) 1369 1370 // Check final progress after successful sync 1371 progress <- struct{}{} 1372 pending.Wait() 1373 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1374 CurrentBlock: uint64(chain.len() - 1), 1375 HighestBlock: uint64(chain.len() - 1), 1376 }) 1377 } 1378 1379 // Tests that if an attacker fakes a chain height, after the attack is detected, 1380 // the progress height is successfully reduced at the next sync invocation. 1381 func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) } 1382 func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) } 1383 func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) } 1384 func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } 1385 func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } 1386 func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) } 1387 1388 func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1389 t.Parallel() 1390 1391 tester := newTester() 1392 defer tester.terminate() 1393 chain := testChainBase.shorten(blockCacheItems - 15) 1394 1395 // Set a sync init hook to catch progress changes 1396 starting := make(chan struct{}) 1397 progress := make(chan struct{}) 1398 tester.downloader.syncInitHook = func(origin, latest uint64) { 1399 starting <- struct{}{} 1400 <-progress 1401 } 1402 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1403 1404 // Create and sync with an attacker that promises a higher chain than available. 1405 brokenChain := chain.shorten(chain.len()) 1406 numMissing := 5 1407 for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- { 1408 delete(brokenChain.headerm, brokenChain.chain[i]) 1409 } 1410 tester.newPeer("attack", protocol, brokenChain) 1411 1412 pending := new(sync.WaitGroup) 1413 pending.Add(1) 1414 go func() { 1415 defer pending.Done() 1416 if err := tester.sync("attack", nil, mode); err == nil { 1417 panic("succeeded attacker synchronisation") 1418 } 1419 }() 1420 <-starting 1421 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1422 HighestBlock: uint64(brokenChain.len() - 1), 1423 }) 1424 progress <- struct{}{} 1425 pending.Wait() 1426 afterFailedSync := tester.downloader.Progress() 1427 1428 // Synchronise with a good peer and check that the progress height has been reduced to 1429 // the true value. 1430 validChain := chain.shorten(chain.len() - numMissing) 1431 tester.newPeer("valid", protocol, validChain) 1432 pending.Add(1) 1433 1434 go func() { 1435 defer pending.Done() 1436 if err := tester.sync("valid", nil, mode); err != nil { 1437 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1438 } 1439 }() 1440 <-starting 1441 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1442 CurrentBlock: afterFailedSync.CurrentBlock, 1443 HighestBlock: uint64(validChain.len() - 1), 1444 }) 1445 1446 // Check final progress after successful sync. 1447 progress <- struct{}{} 1448 pending.Wait() 1449 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1450 CurrentBlock: uint64(validChain.len() - 1), 1451 HighestBlock: uint64(validChain.len() - 1), 1452 }) 1453 } 1454 1455 // This test reproduces an issue where unexpected deliveries would 1456 // block indefinitely if they arrived at the right time. 1457 func TestDeliverHeadersHang(t *testing.T) { 1458 t.Parallel() 1459 1460 testCases := []struct { 1461 protocol int 1462 syncMode SyncMode 1463 }{ 1464 {62, FullSync}, 1465 {63, FullSync}, 1466 {63, FastSync}, 1467 {64, FullSync}, 1468 {64, FastSync}, 1469 {64, LightSync}, 1470 } 1471 for _, tc := range testCases { 1472 t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) { 1473 t.Parallel() 1474 testDeliverHeadersHang(t, tc.protocol, tc.syncMode) 1475 }) 1476 } 1477 } 1478 1479 func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { 1480 master := newTester() 1481 defer master.terminate() 1482 chain := testChainBase.shorten(15) 1483 1484 for i := 0; i < 200; i++ { 1485 tester := newTester() 1486 tester.peerDb = master.peerDb 1487 tester.newPeer("peer", protocol, chain) 1488 1489 // Whenever the downloader requests headers, flood it with 1490 // a lot of unrequested header deliveries. 1491 tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ 1492 peer: tester.downloader.peers.peers["peer"].peer, 1493 tester: tester, 1494 } 1495 if err := tester.sync("peer", nil, mode); err != nil { 1496 t.Errorf("test %d: sync failed: %v", i, err) 1497 } 1498 tester.terminate() 1499 } 1500 } 1501 1502 type floodingTestPeer struct { 1503 peer Peer 1504 tester *downloadTester 1505 } 1506 1507 func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } 1508 func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { 1509 return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) 1510 } 1511 func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { 1512 return ftp.peer.RequestBodies(hashes) 1513 } 1514 func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { 1515 return ftp.peer.RequestReceipts(hashes) 1516 } 1517 func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { 1518 return ftp.peer.RequestNodeData(hashes) 1519 } 1520 1521 func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { 1522 deliveriesDone := make(chan struct{}, 500) 1523 for i := 0; i < cap(deliveriesDone)-1; i++ { 1524 peer := fmt.Sprintf("fake-peer%d", i) 1525 go func() { 1526 ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) 1527 deliveriesDone <- struct{}{} 1528 }() 1529 } 1530 1531 // None of the extra deliveries should block. 1532 timeout := time.After(60 * time.Second) 1533 launched := false 1534 for i := 0; i < cap(deliveriesDone); i++ { 1535 select { 1536 case <-deliveriesDone: 1537 if !launched { 1538 // Start delivering the requested headers 1539 // after one of the flooding responses has arrived. 1540 go func() { 1541 ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) 1542 deliveriesDone <- struct{}{} 1543 }() 1544 launched = true 1545 } 1546 case <-timeout: 1547 panic("blocked") 1548 } 1549 } 1550 return nil 1551 } 1552 1553 func TestRemoteHeaderRequestSpan(t *testing.T) { 1554 testCases := []struct { 1555 remoteHeight uint64 1556 localHeight uint64 1557 expected []int 1558 }{ 1559 // Remote is way higher. We should ask for the remote head and go backwards 1560 {1500, 1000, 1561 []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, 1562 }, 1563 {15000, 13006, 1564 []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, 1565 }, 1566 //Remote is pretty close to us. We don't have to fetch as many 1567 {1200, 1150, 1568 []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, 1569 }, 1570 // Remote is equal to us (so on a fork with higher td) 1571 // We should get the closest couple of ancestors 1572 {1500, 1500, 1573 []int{1497, 1499}, 1574 }, 1575 // We're higher than the remote! Odd 1576 {1000, 1500, 1577 []int{997, 999}, 1578 }, 1579 // Check some weird edgecases that it behaves somewhat rationally 1580 {0, 1500, 1581 []int{0, 2}, 1582 }, 1583 {6000000, 0, 1584 []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, 1585 }, 1586 {0, 0, 1587 []int{0, 2}, 1588 }, 1589 } 1590 reqs := func(from, count, span int) []int { 1591 var r []int 1592 num := from 1593 for len(r) < count { 1594 r = append(r, num) 1595 num += span + 1 1596 } 1597 return r 1598 } 1599 for i, tt := range testCases { 1600 from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) 1601 data := reqs(int(from), count, span) 1602 1603 if max != uint64(data[len(data)-1]) { 1604 t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) 1605 } 1606 failed := false 1607 if len(data) != len(tt.expected) { 1608 failed = true 1609 t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) 1610 } else { 1611 for j, n := range data { 1612 if n != tt.expected[j] { 1613 failed = true 1614 break 1615 } 1616 } 1617 } 1618 if failed { 1619 res := strings.Replace(fmt.Sprint(data), " ", ",", -1) 1620 exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) 1621 t.Logf("got: %v\n", res) 1622 t.Logf("exp: %v\n", exp) 1623 t.Errorf("test %d: wrong values", i) 1624 } 1625 } 1626 } 1627 1628 // Tests that peers below a pre-configured checkpoint block are prevented from 1629 // being fast-synced from, avoiding potential cheap eclipse attacks. 1630 func TestCheckpointEnforcement62(t *testing.T) { testCheckpointEnforcement(t, 62, FullSync) } 1631 func TestCheckpointEnforcement63Full(t *testing.T) { testCheckpointEnforcement(t, 63, FullSync) } 1632 func TestCheckpointEnforcement63Fast(t *testing.T) { testCheckpointEnforcement(t, 63, FastSync) } 1633 func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) } 1634 func TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) } 1635 func TestCheckpointEnforcement64Light(t *testing.T) { testCheckpointEnforcement(t, 64, LightSync) } 1636 1637 func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) { 1638 t.Parallel() 1639 1640 // Create a new tester with a particular hard coded checkpoint block 1641 tester := newTester() 1642 defer tester.terminate() 1643 1644 tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256 1645 chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) 1646 1647 // Attempt to sync with the peer and validate the result 1648 tester.newPeer("peer", protocol, chain) 1649 1650 var expect error 1651 if mode == FastSync || mode == LightSync { 1652 expect = errUnsyncedPeer 1653 } 1654 if err := tester.sync("peer", nil, mode); err != expect { 1655 t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) 1656 } 1657 if mode == FastSync || mode == LightSync { 1658 assertOwnChain(t, tester, 1) 1659 } else { 1660 assertOwnChain(t, tester, chain.len()) 1661 } 1662 }