github.com/corverroos/quorum@v21.1.0+incompatible/eth/downloader/downloader_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "strings" 24 "sync" 25 "sync/atomic" 26 "testing" 27 "time" 28 29 "github.com/ethereum/go-ethereum" 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/core/rawdb" 32 "github.com/ethereum/go-ethereum/core/types" 33 "github.com/ethereum/go-ethereum/ethdb" 34 "github.com/ethereum/go-ethereum/event" 35 "github.com/ethereum/go-ethereum/params" 36 "github.com/ethereum/go-ethereum/trie" 37 ) 38 39 // Reduce some of the parameters to make the tester faster. 40 func init() { 41 maxForkAncestry = 10000 42 blockCacheItems = 1024 43 fsHeaderContCheck = 500 * time.Millisecond 44 45 // set immutability threshold to 10000 as well 46 params.SetQuorumImmutabilityThreshold(10000) 47 } 48 49 // downloadTester is a test simulator for mocking out local block chain. 50 type downloadTester struct { 51 downloader *Downloader 52 53 genesis *types.Block // Genesis blocks used by the tester and peers 54 stateDb ethdb.Database // Database used by the tester for syncing from peers 55 peerDb ethdb.Database // Database of the peers containing all data 56 peers map[string]*downloadTesterPeer 57 58 ownHashes []common.Hash // Hash chain belonging to the tester 59 ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester 60 ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester 61 ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester 62 ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain 63 64 ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester 65 ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester 66 ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester 67 ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain 68 69 lock sync.RWMutex 70 } 71 72 // newTester creates a new downloader test mocker. 73 func newTester() *downloadTester { 74 tester := &downloadTester{ 75 genesis: testGenesis, 76 peerDb: testDB, 77 peers: make(map[string]*downloadTesterPeer), 78 ownHashes: []common.Hash{testGenesis.Hash()}, 79 ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 80 ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 81 ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, 82 ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 83 84 // Initialize ancient store with test genesis block 85 ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 86 ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 87 ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, 88 ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 89 } 90 tester.stateDb = rawdb.NewMemoryDatabase() 91 tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) 92 93 tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer) 94 return tester 95 } 96 97 // terminate aborts any operations on the embedded downloader and releases all 98 // held resources. 99 func (dl *downloadTester) terminate() { 100 dl.downloader.Terminate() 101 } 102 103 // sync starts synchronizing with a remote peer, blocking until it completes. 104 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 105 dl.lock.RLock() 106 hash := dl.peers[id].chain.headBlock().Hash() 107 // If no particular TD was requested, load from the peer's blockchain 108 if td == nil { 109 td = dl.peers[id].chain.td(hash) 110 } 111 dl.lock.RUnlock() 112 113 // Synchronise with the chosen peer and ensure proper cleanup afterwards 114 err := dl.downloader.synchronise(id, hash, td, mode) 115 select { 116 case <-dl.downloader.cancelCh: 117 // Ok, downloader fully cancelled after sync cycle 118 default: 119 // Downloader is still accepting packets, can block a peer up 120 panic("downloader active post sync cycle") // panic will be caught by tester 121 } 122 return err 123 } 124 125 // HasHeader checks if a header is present in the testers canonical chain. 126 func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { 127 return dl.GetHeaderByHash(hash) != nil 128 } 129 130 // HasBlock checks if a block is present in the testers canonical chain. 131 func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { 132 return dl.GetBlockByHash(hash) != nil 133 } 134 135 // HasFastBlock checks if a block is present in the testers canonical chain. 136 func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { 137 dl.lock.RLock() 138 defer dl.lock.RUnlock() 139 140 if _, ok := dl.ancientReceipts[hash]; ok { 141 return true 142 } 143 _, ok := dl.ownReceipts[hash] 144 return ok 145 } 146 147 // GetHeader retrieves a header from the testers canonical chain. 148 func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { 149 dl.lock.RLock() 150 defer dl.lock.RUnlock() 151 152 header := dl.ancientHeaders[hash] 153 if header != nil { 154 return header 155 } 156 return dl.ownHeaders[hash] 157 } 158 159 // GetBlock retrieves a block from the testers canonical chain. 160 func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { 161 dl.lock.RLock() 162 defer dl.lock.RUnlock() 163 164 block := dl.ancientBlocks[hash] 165 if block != nil { 166 return block 167 } 168 return dl.ownBlocks[hash] 169 } 170 171 // CurrentHeader retrieves the current head header from the canonical chain. 172 func (dl *downloadTester) CurrentHeader() *types.Header { 173 dl.lock.RLock() 174 defer dl.lock.RUnlock() 175 176 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 177 if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil { 178 return header 179 } 180 if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { 181 return header 182 } 183 } 184 return dl.genesis.Header() 185 } 186 187 // CurrentBlock retrieves the current head block from the canonical chain. 188 func (dl *downloadTester) CurrentBlock() *types.Block { 189 dl.lock.RLock() 190 defer dl.lock.RUnlock() 191 192 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 193 if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { 194 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 195 return block 196 } 197 return block 198 } 199 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 200 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 201 return block 202 } 203 } 204 } 205 return dl.genesis 206 } 207 208 // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. 209 func (dl *downloadTester) CurrentFastBlock() *types.Block { 210 dl.lock.RLock() 211 defer dl.lock.RUnlock() 212 213 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 214 if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { 215 return block 216 } 217 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 218 return block 219 } 220 } 221 return dl.genesis 222 } 223 224 // FastSyncCommitHead manually sets the head block to a given hash. 225 func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { 226 // For now only check that the state trie is correct 227 if block := dl.GetBlockByHash(hash); block != nil { 228 _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb)) 229 return err 230 } 231 return fmt.Errorf("non existent block: %x", hash[:4]) 232 } 233 234 // GetTd retrieves the block's total difficulty from the canonical chain. 235 func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { 236 dl.lock.RLock() 237 defer dl.lock.RUnlock() 238 239 if td := dl.ancientChainTd[hash]; td != nil { 240 return td 241 } 242 return dl.ownChainTd[hash] 243 } 244 245 // InsertHeaderChain injects a new batch of headers into the simulated chain. 246 func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) { 247 dl.lock.Lock() 248 defer dl.lock.Unlock() 249 250 // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors 251 if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok { 252 return 0, errors.New("unknown parent") 253 } 254 for i := 1; i < len(headers); i++ { 255 if headers[i].ParentHash != headers[i-1].Hash() { 256 return i, errors.New("unknown parent") 257 } 258 } 259 // Do a full insert if pre-checks passed 260 for i, header := range headers { 261 if _, ok := dl.ownHeaders[header.Hash()]; ok { 262 continue 263 } 264 if _, ok := dl.ownHeaders[header.ParentHash]; !ok { 265 return i, errors.New("unknown parent") 266 } 267 dl.ownHashes = append(dl.ownHashes, header.Hash()) 268 dl.ownHeaders[header.Hash()] = header 269 dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty) 270 } 271 return len(headers), nil 272 } 273 274 // InsertChain injects a new batch of blocks into the simulated chain. 275 func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) { 276 dl.lock.Lock() 277 defer dl.lock.Unlock() 278 279 for i, block := range blocks { 280 if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { 281 return i, errors.New("unknown parent") 282 } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { 283 return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err) 284 } 285 if _, ok := dl.ownHeaders[block.Hash()]; !ok { 286 dl.ownHashes = append(dl.ownHashes, block.Hash()) 287 dl.ownHeaders[block.Hash()] = block.Header() 288 } 289 dl.ownBlocks[block.Hash()] = block 290 dl.ownReceipts[block.Hash()] = make(types.Receipts, 0) 291 dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) 292 dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty()) 293 } 294 return len(blocks), nil 295 } 296 297 // InsertReceiptChain injects a new batch of receipts into the simulated chain. 298 func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) { 299 dl.lock.Lock() 300 defer dl.lock.Unlock() 301 302 for i := 0; i < len(blocks) && i < len(receipts); i++ { 303 if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { 304 return i, errors.New("unknown owner") 305 } 306 if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok { 307 if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { 308 return i, errors.New("unknown parent") 309 } 310 } 311 if blocks[i].NumberU64() <= ancientLimit { 312 dl.ancientBlocks[blocks[i].Hash()] = blocks[i] 313 dl.ancientReceipts[blocks[i].Hash()] = receipts[i] 314 315 // Migrate from active db to ancient db 316 dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header() 317 dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty()) 318 319 delete(dl.ownHeaders, blocks[i].Hash()) 320 delete(dl.ownChainTd, blocks[i].Hash()) 321 } else { 322 dl.ownBlocks[blocks[i].Hash()] = blocks[i] 323 dl.ownReceipts[blocks[i].Hash()] = receipts[i] 324 } 325 } 326 return len(blocks), nil 327 } 328 329 // Rollback removes some recently added elements from the chain. 330 func (dl *downloadTester) Rollback(hashes []common.Hash) { 331 dl.lock.Lock() 332 defer dl.lock.Unlock() 333 334 for i := len(hashes) - 1; i >= 0; i-- { 335 if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { 336 dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] 337 } 338 delete(dl.ownChainTd, hashes[i]) 339 delete(dl.ownHeaders, hashes[i]) 340 delete(dl.ownReceipts, hashes[i]) 341 delete(dl.ownBlocks, hashes[i]) 342 343 delete(dl.ancientChainTd, hashes[i]) 344 delete(dl.ancientHeaders, hashes[i]) 345 delete(dl.ancientReceipts, hashes[i]) 346 delete(dl.ancientBlocks, hashes[i]) 347 } 348 } 349 350 // newPeer registers a new block download source into the downloader. 351 func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error { 352 dl.lock.Lock() 353 defer dl.lock.Unlock() 354 355 peer := &downloadTesterPeer{dl: dl, id: id, chain: chain} 356 dl.peers[id] = peer 357 return dl.downloader.RegisterPeer(id, version, peer) 358 } 359 360 // dropPeer simulates a hard peer removal from the connection pool. 361 func (dl *downloadTester) dropPeer(id string) { 362 dl.lock.Lock() 363 defer dl.lock.Unlock() 364 365 delete(dl.peers, id) 366 dl.downloader.UnregisterPeer(id) 367 } 368 369 type downloadTesterPeer struct { 370 dl *downloadTester 371 id string 372 lock sync.RWMutex 373 chain *testChain 374 missingStates map[common.Hash]bool // State entries that fast sync should not return 375 } 376 377 // Head constructs a function to retrieve a peer's current head hash 378 // and total difficulty. 379 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 380 b := dlp.chain.headBlock() 381 return b.Hash(), dlp.chain.td(b.Hash()) 382 } 383 384 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 385 // origin; associated with a particular peer in the download tester. The returned 386 // function can be used to retrieve batches of headers from the particular peer. 387 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { 388 if reverse { 389 panic("reverse header requests not supported") 390 } 391 392 result := dlp.chain.headersByHash(origin, amount, skip) 393 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 394 return nil 395 } 396 397 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 398 // origin; associated with a particular peer in the download tester. The returned 399 // function can be used to retrieve batches of headers from the particular peer. 400 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { 401 if reverse { 402 panic("reverse header requests not supported") 403 } 404 405 result := dlp.chain.headersByNumber(origin, amount, skip) 406 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 407 return nil 408 } 409 410 // RequestBodies constructs a getBlockBodies method associated with a particular 411 // peer in the download tester. The returned function can be used to retrieve 412 // batches of block bodies from the particularly requested peer. 413 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { 414 txs, uncles := dlp.chain.bodies(hashes) 415 go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles) 416 return nil 417 } 418 419 // RequestReceipts constructs a getReceipts method associated with a particular 420 // peer in the download tester. The returned function can be used to retrieve 421 // batches of block receipts from the particularly requested peer. 422 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { 423 receipts := dlp.chain.receipts(hashes) 424 go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts) 425 return nil 426 } 427 428 // RequestNodeData constructs a getNodeData method associated with a particular 429 // peer in the download tester. The returned function can be used to retrieve 430 // batches of node state data from the particularly requested peer. 431 func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { 432 dlp.dl.lock.RLock() 433 defer dlp.dl.lock.RUnlock() 434 435 results := make([][]byte, 0, len(hashes)) 436 for _, hash := range hashes { 437 if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { 438 if !dlp.missingStates[hash] { 439 results = append(results, data) 440 } 441 } 442 } 443 go dlp.dl.downloader.DeliverNodeData(dlp.id, results) 444 return nil 445 } 446 447 // assertOwnChain checks if the local chain contains the correct number of items 448 // of the various chain components. 449 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 450 // Mark this method as a helper to report errors at callsite, not in here 451 t.Helper() 452 453 assertOwnForkedChain(t, tester, 1, []int{length}) 454 } 455 456 // assertOwnForkedChain checks if the local forked chain contains the correct 457 // number of items of the various chain components. 458 func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { 459 // Mark this method as a helper to report errors at callsite, not in here 460 t.Helper() 461 462 // Initialize the counters for the first fork 463 headers, blocks, receipts := lengths[0], lengths[0], lengths[0] 464 465 // Update the counters for each subsequent fork 466 for _, length := range lengths[1:] { 467 headers += length - common 468 blocks += length - common 469 receipts += length - common 470 } 471 if tester.downloader.mode == LightSync { 472 blocks, receipts = 1, 1 473 } 474 if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers { 475 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 476 } 477 if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks { 478 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 479 } 480 if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts { 481 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 482 } 483 } 484 485 // Tests that simple synchronization against a canonical chain works correctly. 486 // In this test common ancestor lookup should be short circuited and not require 487 // binary searching. 488 func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) } 489 func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) } 490 func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) } 491 func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } 492 func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } 493 func TestCanonicalSynchronisation64Light(t *testing.T) { 494 testCanonicalSynchronisation(t, 64, LightSync) 495 } 496 497 func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { 498 t.Parallel() 499 500 tester := newTester() 501 defer tester.terminate() 502 503 // Create a small enough block chain to download 504 chain := testChainBase.shorten(blockCacheItems - 15) 505 tester.newPeer("peer", protocol, chain) 506 507 // Synchronise with the peer and make sure all relevant data was retrieved 508 if err := tester.sync("peer", nil, mode); err != nil { 509 t.Fatalf("failed to synchronise blocks: %v", err) 510 } 511 assertOwnChain(t, tester, chain.len()) 512 } 513 514 // Tests that if a large batch of blocks are being downloaded, it is throttled 515 // until the cached blocks are retrieved. 516 func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) } 517 func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) } 518 func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) } 519 func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } 520 func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } 521 522 func testThrottling(t *testing.T, protocol int, mode SyncMode) { 523 t.Parallel() 524 tester := newTester() 525 defer tester.terminate() 526 527 // Create a long block chain to download and the tester 528 targetBlocks := testChainBase.len() - 1 529 tester.newPeer("peer", protocol, testChainBase) 530 531 // Wrap the importer to allow stepping 532 blocked, proceed := uint32(0), make(chan struct{}) 533 tester.downloader.chainInsertHook = func(results []*fetchResult) { 534 atomic.StoreUint32(&blocked, uint32(len(results))) 535 <-proceed 536 } 537 // Start a synchronisation concurrently 538 errc := make(chan error) 539 go func() { 540 errc <- tester.sync("peer", nil, mode) 541 }() 542 // Iteratively take some blocks, always checking the retrieval count 543 for { 544 // Check the retrieval count synchronously (! reason for this ugly block) 545 tester.lock.RLock() 546 retrieved := len(tester.ownBlocks) 547 tester.lock.RUnlock() 548 if retrieved >= targetBlocks+1 { 549 break 550 } 551 // Wait a bit for sync to throttle itself 552 var cached, frozen int 553 for start := time.Now(); time.Since(start) < 3*time.Second; { 554 time.Sleep(25 * time.Millisecond) 555 556 tester.lock.Lock() 557 tester.downloader.queue.lock.Lock() 558 cached = len(tester.downloader.queue.blockDonePool) 559 if mode == FastSync { 560 if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { 561 cached = receipts 562 } 563 } 564 frozen = int(atomic.LoadUint32(&blocked)) 565 retrieved = len(tester.ownBlocks) 566 tester.downloader.queue.lock.Unlock() 567 tester.lock.Unlock() 568 569 if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { 570 break 571 } 572 } 573 // Make sure we filled up the cache, then exhaust it 574 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 575 576 tester.lock.RLock() 577 retrieved = len(tester.ownBlocks) 578 tester.lock.RUnlock() 579 if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { 580 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1) 581 } 582 // Permit the blocked blocks to import 583 if atomic.LoadUint32(&blocked) > 0 { 584 atomic.StoreUint32(&blocked, uint32(0)) 585 proceed <- struct{}{} 586 } 587 } 588 // Check that we haven't pulled more blocks than available 589 assertOwnChain(t, tester, targetBlocks+1) 590 if err := <-errc; err != nil { 591 t.Fatalf("block synchronization failed: %v", err) 592 } 593 } 594 595 // Tests that simple synchronization against a forked chain works correctly. In 596 // this test common ancestor lookup should *not* be short circuited, and a full 597 // binary search should be executed. 598 func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) } 599 func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) } 600 func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) } 601 func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } 602 func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } 603 func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) } 604 605 func testForkedSync(t *testing.T, protocol int, mode SyncMode) { 606 t.Parallel() 607 608 tester := newTester() 609 defer tester.terminate() 610 611 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 612 chainB := testChainForkLightB.shorten(testChainBase.len() + 80) 613 tester.newPeer("fork A", protocol, chainA) 614 tester.newPeer("fork B", protocol, chainB) 615 616 // Synchronise with the peer and make sure all blocks were retrieved 617 if err := tester.sync("fork A", nil, mode); err != nil { 618 t.Fatalf("failed to synchronise blocks: %v", err) 619 } 620 assertOwnChain(t, tester, chainA.len()) 621 622 // Synchronise with the second peer and make sure that fork is pulled too 623 if err := tester.sync("fork B", nil, mode); err != nil { 624 t.Fatalf("failed to synchronise blocks: %v", err) 625 } 626 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 627 } 628 629 // Tests that synchronising against a much shorter but much heavyer fork works 630 // corrently and is not dropped. 631 func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) } 632 func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) } 633 func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) } 634 func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } 635 func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } 636 func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) } 637 638 func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 639 t.Parallel() 640 641 tester := newTester() 642 defer tester.terminate() 643 644 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 645 chainB := testChainForkHeavy.shorten(testChainBase.len() + 80) 646 tester.newPeer("light", protocol, chainA) 647 tester.newPeer("heavy", protocol, chainB) 648 649 // Synchronise with the peer and make sure all blocks were retrieved 650 if err := tester.sync("light", nil, mode); err != nil { 651 t.Fatalf("failed to synchronise blocks: %v", err) 652 } 653 assertOwnChain(t, tester, chainA.len()) 654 655 // Synchronise with the second peer and make sure that fork is pulled too 656 if err := tester.sync("heavy", nil, mode); err != nil { 657 t.Fatalf("failed to synchronise blocks: %v", err) 658 } 659 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 660 } 661 662 // Tests that chain forks are contained within a certain interval of the current 663 // chain head, ensuring that malicious peers cannot waste resources by feeding 664 // long dead chains. 665 func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) } 666 func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) } 667 func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) } 668 func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } 669 func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } 670 func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) } 671 672 func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { 673 t.Parallel() 674 675 tester := newTester() 676 defer tester.terminate() 677 678 chainA := testChainForkLightA 679 chainB := testChainForkLightB 680 tester.newPeer("original", protocol, chainA) 681 tester.newPeer("rewriter", protocol, chainB) 682 683 // Synchronise with the peer and make sure all blocks were retrieved 684 if err := tester.sync("original", nil, mode); err != nil { 685 t.Fatalf("failed to synchronise blocks: %v", err) 686 } 687 assertOwnChain(t, tester, chainA.len()) 688 689 // Synchronise with the second peer and ensure that the fork is rejected to being too old 690 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 691 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 692 } 693 } 694 695 // Tests that chain forks are contained within a certain interval of the current 696 // chain head for short but heavy forks too. These are a bit special because they 697 // take different ancestor lookup paths. 698 func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) } 699 func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) } 700 func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) } 701 func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } 702 func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } 703 func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) } 704 705 func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 706 t.Parallel() 707 708 tester := newTester() 709 defer tester.terminate() 710 711 // Create a long enough forked chain 712 chainA := testChainForkLightA 713 chainB := testChainForkHeavy 714 tester.newPeer("original", protocol, chainA) 715 tester.newPeer("heavy-rewriter", protocol, chainB) 716 717 // Synchronise with the peer and make sure all blocks were retrieved 718 if err := tester.sync("original", nil, mode); err != nil { 719 t.Fatalf("failed to synchronise blocks: %v", err) 720 } 721 assertOwnChain(t, tester, chainA.len()) 722 723 // Synchronise with the second peer and ensure that the fork is rejected to being too old 724 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 725 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 726 } 727 } 728 729 // Tests that an inactive downloader will not accept incoming block headers and 730 // bodies. 731 func TestInactiveDownloader62(t *testing.T) { 732 t.Parallel() 733 734 tester := newTester() 735 defer tester.terminate() 736 737 // Check that neither block headers nor bodies are accepted 738 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 739 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 740 } 741 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 742 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 743 } 744 } 745 746 // Tests that an inactive downloader will not accept incoming block headers, 747 // bodies and receipts. 748 func TestInactiveDownloader63(t *testing.T) { 749 t.Parallel() 750 751 tester := newTester() 752 defer tester.terminate() 753 754 // Check that neither block headers nor bodies are accepted 755 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 756 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 757 } 758 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 759 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 760 } 761 if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { 762 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 763 } 764 } 765 766 // Tests that a canceled download wipes all previously accumulated state. 767 func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) } 768 func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) } 769 func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) } 770 func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } 771 func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } 772 func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) } 773 774 func testCancel(t *testing.T, protocol int, mode SyncMode) { 775 t.Parallel() 776 777 tester := newTester() 778 defer tester.terminate() 779 780 chain := testChainBase.shorten(MaxHeaderFetch) 781 tester.newPeer("peer", protocol, chain) 782 783 // Make sure canceling works with a pristine downloader 784 tester.downloader.Cancel() 785 if !tester.downloader.queue.Idle() { 786 t.Errorf("download queue not idle") 787 } 788 // Synchronise with the peer, but cancel afterwards 789 if err := tester.sync("peer", nil, mode); err != nil { 790 t.Fatalf("failed to synchronise blocks: %v", err) 791 } 792 tester.downloader.Cancel() 793 if !tester.downloader.queue.Idle() { 794 t.Errorf("download queue not idle") 795 } 796 } 797 798 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 799 func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) } 800 func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) } 801 func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) } 802 func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } 803 func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } 804 func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) } 805 806 func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { 807 t.Parallel() 808 809 tester := newTester() 810 defer tester.terminate() 811 812 // Create various peers with various parts of the chain 813 targetPeers := 8 814 chain := testChainBase.shorten(targetPeers * 100) 815 816 for i := 0; i < targetPeers; i++ { 817 id := fmt.Sprintf("peer #%d", i) 818 tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1))) 819 } 820 if err := tester.sync("peer #0", nil, mode); err != nil { 821 t.Fatalf("failed to synchronise blocks: %v", err) 822 } 823 assertOwnChain(t, tester, chain.len()) 824 } 825 826 // Tests that synchronisations behave well in multi-version protocol environments 827 // and not wreak havoc on other nodes in the network. 828 func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) } 829 func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) } 830 func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) } 831 func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } 832 func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } 833 func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) } 834 835 func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { 836 t.Parallel() 837 838 tester := newTester() 839 defer tester.terminate() 840 841 // Create a small enough block chain to download 842 chain := testChainBase.shorten(blockCacheItems - 15) 843 844 // Create peers of every type 845 tester.newPeer("peer 62", 62, chain) 846 tester.newPeer("peer 63", 63, chain) 847 tester.newPeer("peer 64", 64, chain) 848 849 // Synchronise with the requested peer and make sure all blocks were retrieved 850 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 851 t.Fatalf("failed to synchronise blocks: %v", err) 852 } 853 assertOwnChain(t, tester, chain.len()) 854 855 // Check that no peers have been dropped off 856 for _, version := range []int{62, 63, 64} { 857 peer := fmt.Sprintf("peer %d", version) 858 if _, ok := tester.peers[peer]; !ok { 859 t.Errorf("%s dropped", peer) 860 } 861 } 862 } 863 864 // Tests that if a block is empty (e.g. header only), no body request should be 865 // made, and instead the header should be assembled into a whole block in itself. 866 func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) } 867 func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) } 868 func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) } 869 func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } 870 func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } 871 func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) } 872 873 func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { 874 t.Parallel() 875 876 tester := newTester() 877 defer tester.terminate() 878 879 // Create a block chain to download 880 chain := testChainBase 881 tester.newPeer("peer", protocol, chain) 882 883 // Instrument the downloader to signal body requests 884 bodiesHave, receiptsHave := int32(0), int32(0) 885 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 886 atomic.AddInt32(&bodiesHave, int32(len(headers))) 887 } 888 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 889 atomic.AddInt32(&receiptsHave, int32(len(headers))) 890 } 891 // Synchronise with the peer and make sure all blocks were retrieved 892 if err := tester.sync("peer", nil, mode); err != nil { 893 t.Fatalf("failed to synchronise blocks: %v", err) 894 } 895 assertOwnChain(t, tester, chain.len()) 896 897 // Validate the number of block bodies that should have been requested 898 bodiesNeeded, receiptsNeeded := 0, 0 899 for _, block := range chain.blockm { 900 if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 901 bodiesNeeded++ 902 } 903 } 904 for _, receipt := range chain.receiptm { 905 if mode == FastSync && len(receipt) > 0 { 906 receiptsNeeded++ 907 } 908 } 909 if int(bodiesHave) != bodiesNeeded { 910 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 911 } 912 if int(receiptsHave) != receiptsNeeded { 913 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) 914 } 915 } 916 917 // Tests that headers are enqueued continuously, preventing malicious nodes from 918 // stalling the downloader by feeding gapped header chains. 919 func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) } 920 func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) } 921 func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) } 922 func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } 923 func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } 924 func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) } 925 926 func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 927 t.Parallel() 928 929 tester := newTester() 930 defer tester.terminate() 931 932 chain := testChainBase.shorten(blockCacheItems - 15) 933 brokenChain := chain.shorten(chain.len()) 934 delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2]) 935 tester.newPeer("attack", protocol, brokenChain) 936 937 if err := tester.sync("attack", nil, mode); err == nil { 938 t.Fatalf("succeeded attacker synchronisation") 939 } 940 // Synchronise with the valid peer and make sure sync succeeds 941 tester.newPeer("valid", protocol, chain) 942 if err := tester.sync("valid", nil, mode); err != nil { 943 t.Fatalf("failed to synchronise blocks: %v", err) 944 } 945 assertOwnChain(t, tester, chain.len()) 946 } 947 948 // Tests that if requested headers are shifted (i.e. first is missing), the queue 949 // detects the invalid numbering. 950 func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) } 951 func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) } 952 func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) } 953 func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } 954 func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } 955 func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) } 956 957 func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 958 t.Parallel() 959 960 tester := newTester() 961 defer tester.terminate() 962 963 chain := testChainBase.shorten(blockCacheItems - 15) 964 965 // Attempt a full sync with an attacker feeding shifted headers 966 brokenChain := chain.shorten(chain.len()) 967 delete(brokenChain.headerm, brokenChain.chain[1]) 968 delete(brokenChain.blockm, brokenChain.chain[1]) 969 delete(brokenChain.receiptm, brokenChain.chain[1]) 970 tester.newPeer("attack", protocol, brokenChain) 971 if err := tester.sync("attack", nil, mode); err == nil { 972 t.Fatalf("succeeded attacker synchronisation") 973 } 974 975 // Synchronise with the valid peer and make sure sync succeeds 976 tester.newPeer("valid", protocol, chain) 977 if err := tester.sync("valid", nil, mode); err != nil { 978 t.Fatalf("failed to synchronise blocks: %v", err) 979 } 980 assertOwnChain(t, tester, chain.len()) 981 } 982 983 // Tests that upon detecting an invalid header, the recent ones are rolled back 984 // for various failure scenarios. Afterwards a full sync is attempted to make 985 // sure no state was corrupted. 986 func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) } 987 func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } 988 func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) } 989 990 func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { 991 t.Parallel() 992 993 tester := newTester() 994 defer tester.terminate() 995 996 // Create a small enough block chain to download 997 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 998 chain := testChainBase.shorten(targetBlocks) 999 1000 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 1001 // This should result in the last fsHeaderSafetyNet headers being rolled back. 1002 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 1003 fastAttackChain := chain.shorten(chain.len()) 1004 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) 1005 tester.newPeer("fast-attack", protocol, fastAttackChain) 1006 1007 if err := tester.sync("fast-attack", nil, mode); err == nil { 1008 t.Fatalf("succeeded fast attacker synchronisation") 1009 } 1010 if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { 1011 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 1012 } 1013 1014 // Attempt to sync with an attacker that feeds junk during the block import phase. 1015 // This should result in both the last fsHeaderSafetyNet number of headers being 1016 // rolled back, and also the pivot point being reverted to a non-block status. 1017 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 1018 blockAttackChain := chain.shorten(chain.len()) 1019 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in 1020 delete(blockAttackChain.headerm, blockAttackChain.chain[missing]) 1021 tester.newPeer("block-attack", protocol, blockAttackChain) 1022 1023 if err := tester.sync("block-attack", nil, mode); err == nil { 1024 t.Fatalf("succeeded block attacker synchronisation") 1025 } 1026 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1027 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1028 } 1029 if mode == FastSync { 1030 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1031 t.Errorf("fast sync pivot block #%d not rolled back", head) 1032 } 1033 } 1034 1035 // Attempt to sync with an attacker that withholds promised blocks after the 1036 // fast sync pivot point. This could be a trial to leave the node with a bad 1037 // but already imported pivot block. 1038 withholdAttackChain := chain.shorten(chain.len()) 1039 tester.newPeer("withhold-attack", protocol, withholdAttackChain) 1040 tester.downloader.syncInitHook = func(uint64, uint64) { 1041 for i := missing; i < withholdAttackChain.len(); i++ { 1042 delete(withholdAttackChain.headerm, withholdAttackChain.chain[i]) 1043 } 1044 tester.downloader.syncInitHook = nil 1045 } 1046 if err := tester.sync("withhold-attack", nil, mode); err == nil { 1047 t.Fatalf("succeeded withholding attacker synchronisation") 1048 } 1049 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1050 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1051 } 1052 if mode == FastSync { 1053 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1054 t.Errorf("fast sync pivot block #%d not rolled back", head) 1055 } 1056 } 1057 1058 // synchronise with the valid peer and make sure sync succeeds. Since the last rollback 1059 // should also disable fast syncing for this process, verify that we did a fresh full 1060 // sync. Note, we can't assert anything about the receipts since we won't purge the 1061 // database of them, hence we can't use assertOwnChain. 1062 tester.newPeer("valid", protocol, chain) 1063 if err := tester.sync("valid", nil, mode); err != nil { 1064 t.Fatalf("failed to synchronise blocks: %v", err) 1065 } 1066 if hs := len(tester.ownHeaders); hs != chain.len() { 1067 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len()) 1068 } 1069 if mode != LightSync { 1070 if bs := len(tester.ownBlocks); bs != chain.len() { 1071 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len()) 1072 } 1073 } 1074 } 1075 1076 // Tests that a peer advertising an high TD doesn't get to stall the downloader 1077 // afterwards by not sending any useful hashes. 1078 func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) } 1079 func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) } 1080 func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) } 1081 func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } 1082 func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } 1083 func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) } 1084 1085 func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { 1086 t.Parallel() 1087 1088 tester := newTester() 1089 defer tester.terminate() 1090 1091 chain := testChainBase.shorten(1) 1092 tester.newPeer("attack", protocol, chain) 1093 if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { 1094 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 1095 } 1096 } 1097 1098 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 1099 func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) } 1100 func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) } 1101 func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } 1102 1103 func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { 1104 t.Parallel() 1105 1106 // Define the disconnection requirement for individual hash fetch errors 1107 tests := []struct { 1108 result error 1109 drop bool 1110 }{ 1111 {nil, false}, // Sync succeeded, all is well 1112 {errBusy, false}, // Sync is already in progress, no problem 1113 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1114 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1115 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1116 {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it 1117 {errNoPeers, false}, // No peers to download from, soft race, no issue 1118 {errTimeout, true}, // No hashes received in due time, drop the peer 1119 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1120 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1121 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1122 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1123 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1124 {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin 1125 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1126 } 1127 // Run the tests and check disconnection status 1128 tester := newTester() 1129 defer tester.terminate() 1130 chain := testChainBase.shorten(1) 1131 1132 for i, tt := range tests { 1133 // Register a new peer and ensure it's presence 1134 id := fmt.Sprintf("test %d", i) 1135 if err := tester.newPeer(id, protocol, chain); err != nil { 1136 t.Fatalf("test %d: failed to register new peer: %v", i, err) 1137 } 1138 if _, ok := tester.peers[id]; !ok { 1139 t.Fatalf("test %d: registered peer not found", i) 1140 } 1141 // Simulate a synchronisation and check the required result 1142 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1143 1144 tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) 1145 if _, ok := tester.peers[id]; !ok != tt.drop { 1146 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1147 } 1148 } 1149 } 1150 1151 // Tests that synchronisation progress (origin block number, current block number 1152 // and highest block number) is tracked and updated correctly. 1153 func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) } 1154 func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) } 1155 func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) } 1156 func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } 1157 func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } 1158 func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) } 1159 1160 func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1161 t.Parallel() 1162 1163 tester := newTester() 1164 defer tester.terminate() 1165 chain := testChainBase.shorten(blockCacheItems - 15) 1166 1167 // Set a sync init hook to catch progress changes 1168 starting := make(chan struct{}) 1169 progress := make(chan struct{}) 1170 1171 tester.downloader.syncInitHook = func(origin, latest uint64) { 1172 starting <- struct{}{} 1173 <-progress 1174 } 1175 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1176 1177 // Synchronise half the blocks and check initial progress 1178 tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2)) 1179 pending := new(sync.WaitGroup) 1180 pending.Add(1) 1181 1182 go func() { 1183 defer pending.Done() 1184 if err := tester.sync("peer-half", nil, mode); err != nil { 1185 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1186 } 1187 }() 1188 <-starting 1189 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1190 HighestBlock: uint64(chain.len()/2 - 1), 1191 }) 1192 progress <- struct{}{} 1193 pending.Wait() 1194 1195 // Synchronise all the blocks and check continuation progress 1196 tester.newPeer("peer-full", protocol, chain) 1197 pending.Add(1) 1198 go func() { 1199 defer pending.Done() 1200 if err := tester.sync("peer-full", nil, mode); err != nil { 1201 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1202 } 1203 }() 1204 <-starting 1205 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1206 StartingBlock: uint64(chain.len()/2 - 1), 1207 CurrentBlock: uint64(chain.len()/2 - 1), 1208 HighestBlock: uint64(chain.len() - 1), 1209 }) 1210 1211 // Check final progress after successful sync 1212 progress <- struct{}{} 1213 pending.Wait() 1214 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1215 StartingBlock: uint64(chain.len()/2 - 1), 1216 CurrentBlock: uint64(chain.len() - 1), 1217 HighestBlock: uint64(chain.len() - 1), 1218 }) 1219 } 1220 1221 func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { 1222 // Mark this method as a helper to report errors at callsite, not in here 1223 t.Helper() 1224 1225 p := d.Progress() 1226 p.KnownStates, p.PulledStates = 0, 0 1227 want.KnownStates, want.PulledStates = 0, 0 1228 if p != want { 1229 t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) 1230 } 1231 } 1232 1233 // Tests that synchronisation progress (origin block number and highest block 1234 // number) is tracked and updated correctly in case of a fork (or manual head 1235 // revertal). 1236 func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) } 1237 func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) } 1238 func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) } 1239 func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } 1240 func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } 1241 func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) } 1242 1243 func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1244 t.Parallel() 1245 1246 tester := newTester() 1247 defer tester.terminate() 1248 chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch) 1249 chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch) 1250 1251 // Set a sync init hook to catch progress changes 1252 starting := make(chan struct{}) 1253 progress := make(chan struct{}) 1254 1255 tester.downloader.syncInitHook = func(origin, latest uint64) { 1256 starting <- struct{}{} 1257 <-progress 1258 } 1259 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1260 1261 // Synchronise with one of the forks and check progress 1262 tester.newPeer("fork A", protocol, chainA) 1263 pending := new(sync.WaitGroup) 1264 pending.Add(1) 1265 go func() { 1266 defer pending.Done() 1267 if err := tester.sync("fork A", nil, mode); err != nil { 1268 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1269 } 1270 }() 1271 <-starting 1272 1273 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1274 HighestBlock: uint64(chainA.len() - 1), 1275 }) 1276 progress <- struct{}{} 1277 pending.Wait() 1278 1279 // Simulate a successful sync above the fork 1280 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1281 1282 // Synchronise with the second fork and check progress resets 1283 tester.newPeer("fork B", protocol, chainB) 1284 pending.Add(1) 1285 go func() { 1286 defer pending.Done() 1287 if err := tester.sync("fork B", nil, mode); err != nil { 1288 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1289 } 1290 }() 1291 <-starting 1292 checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ 1293 StartingBlock: uint64(testChainBase.len()) - 1, 1294 CurrentBlock: uint64(chainA.len() - 1), 1295 HighestBlock: uint64(chainB.len() - 1), 1296 }) 1297 1298 // Check final progress after successful sync 1299 progress <- struct{}{} 1300 pending.Wait() 1301 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1302 StartingBlock: uint64(testChainBase.len()) - 1, 1303 CurrentBlock: uint64(chainB.len() - 1), 1304 HighestBlock: uint64(chainB.len() - 1), 1305 }) 1306 } 1307 1308 // Tests that if synchronisation is aborted due to some failure, then the progress 1309 // origin is not updated in the next sync cycle, as it should be considered the 1310 // continuation of the previous sync and not a new instance. 1311 func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) } 1312 func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) } 1313 func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) } 1314 func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } 1315 func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } 1316 func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) } 1317 1318 func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1319 t.Parallel() 1320 1321 tester := newTester() 1322 defer tester.terminate() 1323 chain := testChainBase.shorten(blockCacheItems - 15) 1324 1325 // Set a sync init hook to catch progress changes 1326 starting := make(chan struct{}) 1327 progress := make(chan struct{}) 1328 1329 tester.downloader.syncInitHook = func(origin, latest uint64) { 1330 starting <- struct{}{} 1331 <-progress 1332 } 1333 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1334 1335 // Attempt a full sync with a faulty peer 1336 brokenChain := chain.shorten(chain.len()) 1337 missing := brokenChain.len() / 2 1338 delete(brokenChain.headerm, brokenChain.chain[missing]) 1339 delete(brokenChain.blockm, brokenChain.chain[missing]) 1340 delete(brokenChain.receiptm, brokenChain.chain[missing]) 1341 tester.newPeer("faulty", protocol, brokenChain) 1342 1343 pending := new(sync.WaitGroup) 1344 pending.Add(1) 1345 go func() { 1346 defer pending.Done() 1347 if err := tester.sync("faulty", nil, mode); err == nil { 1348 panic("succeeded faulty synchronisation") 1349 } 1350 }() 1351 <-starting 1352 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1353 HighestBlock: uint64(brokenChain.len() - 1), 1354 }) 1355 progress <- struct{}{} 1356 pending.Wait() 1357 afterFailedSync := tester.downloader.Progress() 1358 1359 // Synchronise with a good peer and check that the progress origin remind the same 1360 // after a failure 1361 tester.newPeer("valid", protocol, chain) 1362 pending.Add(1) 1363 go func() { 1364 defer pending.Done() 1365 if err := tester.sync("valid", nil, mode); err != nil { 1366 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1367 } 1368 }() 1369 <-starting 1370 checkProgress(t, tester.downloader, "completing", afterFailedSync) 1371 1372 // Check final progress after successful sync 1373 progress <- struct{}{} 1374 pending.Wait() 1375 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1376 CurrentBlock: uint64(chain.len() - 1), 1377 HighestBlock: uint64(chain.len() - 1), 1378 }) 1379 } 1380 1381 // Tests that if an attacker fakes a chain height, after the attack is detected, 1382 // the progress height is successfully reduced at the next sync invocation. 1383 func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) } 1384 func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) } 1385 func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) } 1386 func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } 1387 func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } 1388 func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) } 1389 1390 func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1391 t.Parallel() 1392 1393 tester := newTester() 1394 defer tester.terminate() 1395 chain := testChainBase.shorten(blockCacheItems - 15) 1396 1397 // Set a sync init hook to catch progress changes 1398 starting := make(chan struct{}) 1399 progress := make(chan struct{}) 1400 tester.downloader.syncInitHook = func(origin, latest uint64) { 1401 starting <- struct{}{} 1402 <-progress 1403 } 1404 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1405 1406 // Create and sync with an attacker that promises a higher chain than available. 1407 brokenChain := chain.shorten(chain.len()) 1408 numMissing := 5 1409 for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- { 1410 delete(brokenChain.headerm, brokenChain.chain[i]) 1411 } 1412 tester.newPeer("attack", protocol, brokenChain) 1413 1414 pending := new(sync.WaitGroup) 1415 pending.Add(1) 1416 go func() { 1417 defer pending.Done() 1418 if err := tester.sync("attack", nil, mode); err == nil { 1419 panic("succeeded attacker synchronisation") 1420 } 1421 }() 1422 <-starting 1423 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1424 HighestBlock: uint64(brokenChain.len() - 1), 1425 }) 1426 progress <- struct{}{} 1427 pending.Wait() 1428 afterFailedSync := tester.downloader.Progress() 1429 1430 // Synchronise with a good peer and check that the progress height has been reduced to 1431 // the true value. 1432 validChain := chain.shorten(chain.len() - numMissing) 1433 tester.newPeer("valid", protocol, validChain) 1434 pending.Add(1) 1435 1436 go func() { 1437 defer pending.Done() 1438 if err := tester.sync("valid", nil, mode); err != nil { 1439 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1440 } 1441 }() 1442 <-starting 1443 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1444 CurrentBlock: afterFailedSync.CurrentBlock, 1445 HighestBlock: uint64(validChain.len() - 1), 1446 }) 1447 1448 // Check final progress after successful sync. 1449 progress <- struct{}{} 1450 pending.Wait() 1451 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1452 CurrentBlock: uint64(validChain.len() - 1), 1453 HighestBlock: uint64(validChain.len() - 1), 1454 }) 1455 } 1456 1457 // This test reproduces an issue where unexpected deliveries would 1458 // block indefinitely if they arrived at the right time. 1459 func TestDeliverHeadersHang(t *testing.T) { 1460 t.Parallel() 1461 1462 testCases := []struct { 1463 protocol int 1464 syncMode SyncMode 1465 }{ 1466 {62, FullSync}, 1467 {63, FullSync}, 1468 {63, FastSync}, 1469 {64, FullSync}, 1470 {64, FastSync}, 1471 {64, LightSync}, 1472 } 1473 for _, tc := range testCases { 1474 t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) { 1475 t.Parallel() 1476 testDeliverHeadersHang(t, tc.protocol, tc.syncMode) 1477 }) 1478 } 1479 } 1480 1481 func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { 1482 master := newTester() 1483 defer master.terminate() 1484 chain := testChainBase.shorten(15) 1485 1486 for i := 0; i < 200; i++ { 1487 tester := newTester() 1488 tester.peerDb = master.peerDb 1489 tester.newPeer("peer", protocol, chain) 1490 1491 // Whenever the downloader requests headers, flood it with 1492 // a lot of unrequested header deliveries. 1493 tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ 1494 peer: tester.downloader.peers.peers["peer"].peer, 1495 tester: tester, 1496 } 1497 if err := tester.sync("peer", nil, mode); err != nil { 1498 t.Errorf("test %d: sync failed: %v", i, err) 1499 } 1500 tester.terminate() 1501 } 1502 } 1503 1504 type floodingTestPeer struct { 1505 peer Peer 1506 tester *downloadTester 1507 } 1508 1509 func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } 1510 func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { 1511 return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) 1512 } 1513 func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { 1514 return ftp.peer.RequestBodies(hashes) 1515 } 1516 func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { 1517 return ftp.peer.RequestReceipts(hashes) 1518 } 1519 func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { 1520 return ftp.peer.RequestNodeData(hashes) 1521 } 1522 1523 func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { 1524 deliveriesDone := make(chan struct{}, 500) 1525 for i := 0; i < cap(deliveriesDone)-1; i++ { 1526 peer := fmt.Sprintf("fake-peer%d", i) 1527 go func() { 1528 ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) 1529 deliveriesDone <- struct{}{} 1530 }() 1531 } 1532 1533 // None of the extra deliveries should block. 1534 timeout := time.After(60 * time.Second) 1535 launched := false 1536 for i := 0; i < cap(deliveriesDone); i++ { 1537 select { 1538 case <-deliveriesDone: 1539 if !launched { 1540 // Start delivering the requested headers 1541 // after one of the flooding responses has arrived. 1542 go func() { 1543 ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) 1544 deliveriesDone <- struct{}{} 1545 }() 1546 launched = true 1547 } 1548 case <-timeout: 1549 panic("blocked") 1550 } 1551 } 1552 return nil 1553 } 1554 1555 func TestRemoteHeaderRequestSpan(t *testing.T) { 1556 testCases := []struct { 1557 remoteHeight uint64 1558 localHeight uint64 1559 expected []int 1560 }{ 1561 // Remote is way higher. We should ask for the remote head and go backwards 1562 {1500, 1000, 1563 []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, 1564 }, 1565 {15000, 13006, 1566 []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, 1567 }, 1568 //Remote is pretty close to us. We don't have to fetch as many 1569 {1200, 1150, 1570 []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, 1571 }, 1572 // Remote is equal to us (so on a fork with higher td) 1573 // We should get the closest couple of ancestors 1574 {1500, 1500, 1575 []int{1497, 1499}, 1576 }, 1577 // We're higher than the remote! Odd 1578 {1000, 1500, 1579 []int{997, 999}, 1580 }, 1581 // Check some weird edgecases that it behaves somewhat rationally 1582 {0, 1500, 1583 []int{0, 2}, 1584 }, 1585 {6000000, 0, 1586 []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, 1587 }, 1588 {0, 0, 1589 []int{0, 2}, 1590 }, 1591 } 1592 reqs := func(from, count, span int) []int { 1593 var r []int 1594 num := from 1595 for len(r) < count { 1596 r = append(r, num) 1597 num += span + 1 1598 } 1599 return r 1600 } 1601 for i, tt := range testCases { 1602 from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) 1603 data := reqs(int(from), count, span) 1604 1605 if max != uint64(data[len(data)-1]) { 1606 t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) 1607 } 1608 failed := false 1609 if len(data) != len(tt.expected) { 1610 failed = true 1611 t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) 1612 } else { 1613 for j, n := range data { 1614 if n != tt.expected[j] { 1615 failed = true 1616 break 1617 } 1618 } 1619 } 1620 if failed { 1621 res := strings.Replace(fmt.Sprint(data), " ", ",", -1) 1622 exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) 1623 t.Logf("got: %v\n", res) 1624 t.Logf("exp: %v\n", exp) 1625 t.Errorf("test %d: wrong values", i) 1626 } 1627 } 1628 } 1629 1630 // Tests that peers below a pre-configured checkpoint block are prevented from 1631 // being fast-synced from, avoiding potential cheap eclipse attacks. 1632 func TestCheckpointEnforcement62(t *testing.T) { testCheckpointEnforcement(t, 62, FullSync) } 1633 func TestCheckpointEnforcement63Full(t *testing.T) { testCheckpointEnforcement(t, 63, FullSync) } 1634 func TestCheckpointEnforcement63Fast(t *testing.T) { testCheckpointEnforcement(t, 63, FastSync) } 1635 func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) } 1636 func TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) } 1637 func TestCheckpointEnforcement64Light(t *testing.T) { testCheckpointEnforcement(t, 64, LightSync) } 1638 1639 func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) { 1640 t.Parallel() 1641 1642 // Create a new tester with a particular hard coded checkpoint block 1643 tester := newTester() 1644 defer tester.terminate() 1645 1646 tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256 1647 chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) 1648 1649 // Attempt to sync with the peer and validate the result 1650 tester.newPeer("peer", protocol, chain) 1651 1652 var expect error 1653 if mode == FastSync || mode == LightSync { 1654 expect = errUnsyncedPeer 1655 } 1656 if err := tester.sync("peer", nil, mode); err != expect { 1657 t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) 1658 } 1659 if mode == FastSync || mode == LightSync { 1660 assertOwnChain(t, tester, 1) 1661 } else { 1662 assertOwnChain(t, tester, chain.len()) 1663 } 1664 }