github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/eth/downloader/downloader_test.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // Copyright 2019 The go-aigar Authors 3 // This file is part of the go-aigar library. 4 // 5 // The go-aigar library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-aigar library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>. 17 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "strings" 25 "sync" 26 "sync/atomic" 27 "testing" 28 "time" 29 30 "github.com/AigarNetwork/aigar" 31 "github.com/AigarNetwork/aigar/common" 32 "github.com/AigarNetwork/aigar/core/rawdb" 33 "github.com/AigarNetwork/aigar/core/types" 34 "github.com/AigarNetwork/aigar/ethdb" 35 "github.com/AigarNetwork/aigar/event" 36 "github.com/AigarNetwork/aigar/trie" 37 ) 38 39 // Reduce some of the parameters to make the tester faster. 40 func init() { 41 maxForkAncestry = 10000 42 blockCacheItems = 1024 43 fsHeaderContCheck = 500 * time.Millisecond 44 } 45 46 // downloadTester is a test simulator for mocking out local block chain. 47 type downloadTester struct { 48 downloader *Downloader 49 50 genesis *types.Block // Genesis blocks used by the tester and peers 51 stateDb ethdb.Database // Database used by the tester for syncing from peers 52 peerDb ethdb.Database // Database of the peers containing all data 53 peers map[string]*downloadTesterPeer 54 55 ownHashes []common.Hash // Hash chain belonging to the tester 56 ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester 57 ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester 58 ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester 59 ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain 60 61 ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester 62 ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester 63 ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester 64 ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain 65 66 lock sync.RWMutex 67 } 68 69 // newTester creates a new downloader test mocker. 70 func newTester() *downloadTester { 71 tester := &downloadTester{ 72 genesis: testGenesis, 73 peerDb: testDB, 74 peers: make(map[string]*downloadTesterPeer), 75 ownHashes: []common.Hash{testGenesis.Hash()}, 76 ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 77 ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 78 ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, 79 ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 80 81 // Initialize ancient store with test genesis block 82 ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 83 ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 84 ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, 85 ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 86 } 87 tester.stateDb = rawdb.NewMemoryDatabase() 88 tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) 89 90 tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer) 91 return tester 92 } 93 94 // terminate aborts any operations on the embedded downloader and releases all 95 // held resources. 96 func (dl *downloadTester) terminate() { 97 dl.downloader.Terminate() 98 } 99 100 // sync starts synchronizing with a remote peer, blocking until it completes. 101 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 102 dl.lock.RLock() 103 hash := dl.peers[id].chain.headBlock().Hash() 104 // If no particular TD was requested, load from the peer's blockchain 105 if td == nil { 106 td = dl.peers[id].chain.td(hash) 107 } 108 dl.lock.RUnlock() 109 110 // Synchronise with the chosen peer and ensure proper cleanup afterwards 111 err := dl.downloader.synchronise(id, hash, td, mode) 112 select { 113 case <-dl.downloader.cancelCh: 114 // Ok, downloader fully cancelled after sync cycle 115 default: 116 // Downloader is still accepting packets, can block a peer up 117 panic("downloader active post sync cycle") // panic will be caught by tester 118 } 119 return err 120 } 121 122 // HasHeader checks if a header is present in the testers canonical chain. 123 func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { 124 return dl.GetHeaderByHash(hash) != nil 125 } 126 127 // HasBlock checks if a block is present in the testers canonical chain. 128 func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { 129 return dl.GetBlockByHash(hash) != nil 130 } 131 132 // HasFastBlock checks if a block is present in the testers canonical chain. 133 func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { 134 dl.lock.RLock() 135 defer dl.lock.RUnlock() 136 137 if _, ok := dl.ancientReceipts[hash]; ok { 138 return true 139 } 140 _, ok := dl.ownReceipts[hash] 141 return ok 142 } 143 144 // GetHeader retrieves a header from the testers canonical chain. 145 func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { 146 dl.lock.RLock() 147 defer dl.lock.RUnlock() 148 149 header := dl.ancientHeaders[hash] 150 if header != nil { 151 return header 152 } 153 return dl.ownHeaders[hash] 154 } 155 156 // GetBlock retrieves a block from the testers canonical chain. 157 func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { 158 dl.lock.RLock() 159 defer dl.lock.RUnlock() 160 161 block := dl.ancientBlocks[hash] 162 if block != nil { 163 return block 164 } 165 return dl.ownBlocks[hash] 166 } 167 168 // CurrentHeader retrieves the current head header from the canonical chain. 169 func (dl *downloadTester) CurrentHeader() *types.Header { 170 dl.lock.RLock() 171 defer dl.lock.RUnlock() 172 173 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 174 if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil { 175 return header 176 } 177 if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { 178 return header 179 } 180 } 181 return dl.genesis.Header() 182 } 183 184 // CurrentBlock retrieves the current head block from the canonical chain. 185 func (dl *downloadTester) CurrentBlock() *types.Block { 186 dl.lock.RLock() 187 defer dl.lock.RUnlock() 188 189 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 190 if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { 191 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 192 return block 193 } 194 return block 195 } 196 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 197 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 198 return block 199 } 200 } 201 } 202 return dl.genesis 203 } 204 205 // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. 206 func (dl *downloadTester) CurrentFastBlock() *types.Block { 207 dl.lock.RLock() 208 defer dl.lock.RUnlock() 209 210 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 211 if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { 212 return block 213 } 214 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 215 return block 216 } 217 } 218 return dl.genesis 219 } 220 221 // FastSyncCommitHead manually sets the head block to a given hash. 222 func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { 223 // For now only check that the state trie is correct 224 if block := dl.GetBlockByHash(hash); block != nil { 225 _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb)) 226 return err 227 } 228 return fmt.Errorf("non existent block: %x", hash[:4]) 229 } 230 231 // GetTd retrieves the block's total difficulty from the canonical chain. 232 func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { 233 dl.lock.RLock() 234 defer dl.lock.RUnlock() 235 236 if td := dl.ancientChainTd[hash]; td != nil { 237 return td 238 } 239 return dl.ownChainTd[hash] 240 } 241 242 // InsertHeaderChain injects a new batch of headers into the simulated chain. 243 func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) { 244 dl.lock.Lock() 245 defer dl.lock.Unlock() 246 247 // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors 248 if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok { 249 return 0, errors.New("unknown parent") 250 } 251 for i := 1; i < len(headers); i++ { 252 if headers[i].ParentHash != headers[i-1].Hash() { 253 return i, errors.New("unknown parent") 254 } 255 } 256 // Do a full insert if pre-checks passed 257 for i, header := range headers { 258 if _, ok := dl.ownHeaders[header.Hash()]; ok { 259 continue 260 } 261 if _, ok := dl.ownHeaders[header.ParentHash]; !ok { 262 return i, errors.New("unknown parent") 263 } 264 dl.ownHashes = append(dl.ownHashes, header.Hash()) 265 dl.ownHeaders[header.Hash()] = header 266 dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty) 267 } 268 return len(headers), nil 269 } 270 271 // InsertChain injects a new batch of blocks into the simulated chain. 272 func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) { 273 dl.lock.Lock() 274 defer dl.lock.Unlock() 275 276 for i, block := range blocks { 277 if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { 278 return i, errors.New("unknown parent") 279 } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { 280 return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err) 281 } 282 if _, ok := dl.ownHeaders[block.Hash()]; !ok { 283 dl.ownHashes = append(dl.ownHashes, block.Hash()) 284 dl.ownHeaders[block.Hash()] = block.Header() 285 } 286 dl.ownBlocks[block.Hash()] = block 287 dl.ownReceipts[block.Hash()] = make(types.Receipts, 0) 288 dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) 289 dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty()) 290 } 291 return len(blocks), nil 292 } 293 294 // InsertReceiptChain injects a new batch of receipts into the simulated chain. 295 func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) { 296 dl.lock.Lock() 297 defer dl.lock.Unlock() 298 299 for i := 0; i < len(blocks) && i < len(receipts); i++ { 300 if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { 301 return i, errors.New("unknown owner") 302 } 303 if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok { 304 if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { 305 return i, errors.New("unknown parent") 306 } 307 } 308 if blocks[i].NumberU64() <= ancientLimit { 309 dl.ancientBlocks[blocks[i].Hash()] = blocks[i] 310 dl.ancientReceipts[blocks[i].Hash()] = receipts[i] 311 312 // Migrate from active db to ancient db 313 dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header() 314 dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty()) 315 316 delete(dl.ownHeaders, blocks[i].Hash()) 317 delete(dl.ownChainTd, blocks[i].Hash()) 318 } else { 319 dl.ownBlocks[blocks[i].Hash()] = blocks[i] 320 dl.ownReceipts[blocks[i].Hash()] = receipts[i] 321 } 322 } 323 return len(blocks), nil 324 } 325 326 // Rollback removes some recently added elements from the chain. 327 func (dl *downloadTester) Rollback(hashes []common.Hash) { 328 dl.lock.Lock() 329 defer dl.lock.Unlock() 330 331 for i := len(hashes) - 1; i >= 0; i-- { 332 if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { 333 dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] 334 } 335 delete(dl.ownChainTd, hashes[i]) 336 delete(dl.ownHeaders, hashes[i]) 337 delete(dl.ownReceipts, hashes[i]) 338 delete(dl.ownBlocks, hashes[i]) 339 340 delete(dl.ancientChainTd, hashes[i]) 341 delete(dl.ancientHeaders, hashes[i]) 342 delete(dl.ancientReceipts, hashes[i]) 343 delete(dl.ancientBlocks, hashes[i]) 344 } 345 } 346 347 // newPeer registers a new block download source into the downloader. 348 func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error { 349 dl.lock.Lock() 350 defer dl.lock.Unlock() 351 352 peer := &downloadTesterPeer{dl: dl, id: id, chain: chain} 353 dl.peers[id] = peer 354 return dl.downloader.RegisterPeer(id, version, peer) 355 } 356 357 // dropPeer simulates a hard peer removal from the connection pool. 358 func (dl *downloadTester) dropPeer(id string) { 359 dl.lock.Lock() 360 defer dl.lock.Unlock() 361 362 delete(dl.peers, id) 363 dl.downloader.UnregisterPeer(id) 364 } 365 366 type downloadTesterPeer struct { 367 dl *downloadTester 368 id string 369 lock sync.RWMutex 370 chain *testChain 371 missingStates map[common.Hash]bool // State entries that fast sync should not return 372 } 373 374 // Head constructs a function to retrieve a peer's current head hash 375 // and total difficulty. 376 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 377 b := dlp.chain.headBlock() 378 return b.Hash(), dlp.chain.td(b.Hash()) 379 } 380 381 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 382 // origin; associated with a particular peer in the download tester. The returned 383 // function can be used to retrieve batches of headers from the particular peer. 384 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { 385 if reverse { 386 panic("reverse header requests not supported") 387 } 388 389 result := dlp.chain.headersByHash(origin, amount, skip) 390 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 391 return nil 392 } 393 394 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 395 // origin; associated with a particular peer in the download tester. The returned 396 // function can be used to retrieve batches of headers from the particular peer. 397 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { 398 if reverse { 399 panic("reverse header requests not supported") 400 } 401 402 result := dlp.chain.headersByNumber(origin, amount, skip) 403 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 404 return nil 405 } 406 407 // RequestBodies constructs a getBlockBodies method associated with a particular 408 // peer in the download tester. The returned function can be used to retrieve 409 // batches of block bodies from the particularly requested peer. 410 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { 411 txs, uncles := dlp.chain.bodies(hashes) 412 go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles) 413 return nil 414 } 415 416 // RequestReceipts constructs a getReceipts method associated with a particular 417 // peer in the download tester. The returned function can be used to retrieve 418 // batches of block receipts from the particularly requested peer. 419 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { 420 receipts := dlp.chain.receipts(hashes) 421 go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts) 422 return nil 423 } 424 425 // RequestNodeData constructs a getNodeData method associated with a particular 426 // peer in the download tester. The returned function can be used to retrieve 427 // batches of node state data from the particularly requested peer. 428 func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { 429 dlp.dl.lock.RLock() 430 defer dlp.dl.lock.RUnlock() 431 432 results := make([][]byte, 0, len(hashes)) 433 for _, hash := range hashes { 434 if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { 435 if !dlp.missingStates[hash] { 436 results = append(results, data) 437 } 438 } 439 } 440 go dlp.dl.downloader.DeliverNodeData(dlp.id, results) 441 return nil 442 } 443 444 // assertOwnChain checks if the local chain contains the correct number of items 445 // of the various chain components. 446 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 447 // Mark this method as a helper to report errors at callsite, not in here 448 t.Helper() 449 450 assertOwnForkedChain(t, tester, 1, []int{length}) 451 } 452 453 // assertOwnForkedChain checks if the local forked chain contains the correct 454 // number of items of the various chain components. 455 func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { 456 // Mark this method as a helper to report errors at callsite, not in here 457 t.Helper() 458 459 // Initialize the counters for the first fork 460 headers, blocks, receipts := lengths[0], lengths[0], lengths[0] 461 462 // Update the counters for each subsequent fork 463 for _, length := range lengths[1:] { 464 headers += length - common 465 blocks += length - common 466 receipts += length - common 467 } 468 if tester.downloader.mode == LightSync { 469 blocks, receipts = 1, 1 470 } 471 if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers { 472 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 473 } 474 if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks { 475 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 476 } 477 if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts { 478 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 479 } 480 } 481 482 // Tests that simple synchronization against a canonical chain works correctly. 483 // In this test common ancestor lookup should be short circuited and not require 484 // binary searching. 485 func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) } 486 func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) } 487 func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) } 488 func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } 489 func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } 490 func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) } 491 492 func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { 493 t.Parallel() 494 495 tester := newTester() 496 defer tester.terminate() 497 498 // Create a small enough block chain to download 499 chain := testChainBase.shorten(blockCacheItems - 15) 500 tester.newPeer("peer", protocol, chain) 501 502 // Synchronise with the peer and make sure all relevant data was retrieved 503 if err := tester.sync("peer", nil, mode); err != nil { 504 t.Fatalf("failed to synchronise blocks: %v", err) 505 } 506 assertOwnChain(t, tester, chain.len()) 507 } 508 509 // Tests that if a large batch of blocks are being downloaded, it is throttled 510 // until the cached blocks are retrieved. 511 func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) } 512 func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) } 513 func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) } 514 func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } 515 func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } 516 517 func testThrottling(t *testing.T, protocol int, mode SyncMode) { 518 t.Parallel() 519 tester := newTester() 520 defer tester.terminate() 521 522 // Create a long block chain to download and the tester 523 targetBlocks := testChainBase.len() - 1 524 tester.newPeer("peer", protocol, testChainBase) 525 526 // Wrap the importer to allow stepping 527 blocked, proceed := uint32(0), make(chan struct{}) 528 tester.downloader.chainInsertHook = func(results []*fetchResult) { 529 atomic.StoreUint32(&blocked, uint32(len(results))) 530 <-proceed 531 } 532 // Start a synchronisation concurrently 533 errc := make(chan error) 534 go func() { 535 errc <- tester.sync("peer", nil, mode) 536 }() 537 // Iteratively take some blocks, always checking the retrieval count 538 for { 539 // Check the retrieval count synchronously (! reason for this ugly block) 540 tester.lock.RLock() 541 retrieved := len(tester.ownBlocks) 542 tester.lock.RUnlock() 543 if retrieved >= targetBlocks+1 { 544 break 545 } 546 // Wait a bit for sync to throttle itself 547 var cached, frozen int 548 for start := time.Now(); time.Since(start) < 3*time.Second; { 549 time.Sleep(25 * time.Millisecond) 550 551 tester.lock.Lock() 552 tester.downloader.queue.lock.Lock() 553 cached = len(tester.downloader.queue.blockDonePool) 554 if mode == FastSync { 555 if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { 556 cached = receipts 557 } 558 } 559 frozen = int(atomic.LoadUint32(&blocked)) 560 retrieved = len(tester.ownBlocks) 561 tester.downloader.queue.lock.Unlock() 562 tester.lock.Unlock() 563 564 if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { 565 break 566 } 567 } 568 // Make sure we filled up the cache, then exhaust it 569 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 570 571 tester.lock.RLock() 572 retrieved = len(tester.ownBlocks) 573 tester.lock.RUnlock() 574 if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { 575 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1) 576 } 577 // Permit the blocked blocks to import 578 if atomic.LoadUint32(&blocked) > 0 { 579 atomic.StoreUint32(&blocked, uint32(0)) 580 proceed <- struct{}{} 581 } 582 } 583 // Check that we haven't pulled more blocks than available 584 assertOwnChain(t, tester, targetBlocks+1) 585 if err := <-errc; err != nil { 586 t.Fatalf("block synchronization failed: %v", err) 587 } 588 } 589 590 // Tests that simple synchronization against a forked chain works correctly. In 591 // this test common ancestor lookup should *not* be short circuited, and a full 592 // binary search should be executed. 593 func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) } 594 func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) } 595 func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) } 596 func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } 597 func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } 598 func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) } 599 600 func testForkedSync(t *testing.T, protocol int, mode SyncMode) { 601 t.Parallel() 602 603 tester := newTester() 604 defer tester.terminate() 605 606 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 607 chainB := testChainForkLightB.shorten(testChainBase.len() + 80) 608 tester.newPeer("fork A", protocol, chainA) 609 tester.newPeer("fork B", protocol, chainB) 610 611 // Synchronise with the peer and make sure all blocks were retrieved 612 if err := tester.sync("fork A", nil, mode); err != nil { 613 t.Fatalf("failed to synchronise blocks: %v", err) 614 } 615 assertOwnChain(t, tester, chainA.len()) 616 617 // Synchronise with the second peer and make sure that fork is pulled too 618 if err := tester.sync("fork B", nil, mode); err != nil { 619 t.Fatalf("failed to synchronise blocks: %v", err) 620 } 621 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 622 } 623 624 // Tests that synchronising against a much shorter but much heavyer fork works 625 // corrently and is not dropped. 626 func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) } 627 func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) } 628 func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) } 629 func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } 630 func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } 631 func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) } 632 633 func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 634 t.Parallel() 635 636 tester := newTester() 637 defer tester.terminate() 638 639 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 640 chainB := testChainForkHeavy.shorten(testChainBase.len() + 80) 641 tester.newPeer("light", protocol, chainA) 642 tester.newPeer("heavy", protocol, chainB) 643 644 // Synchronise with the peer and make sure all blocks were retrieved 645 if err := tester.sync("light", nil, mode); err != nil { 646 t.Fatalf("failed to synchronise blocks: %v", err) 647 } 648 assertOwnChain(t, tester, chainA.len()) 649 650 // Synchronise with the second peer and make sure that fork is pulled too 651 if err := tester.sync("heavy", nil, mode); err != nil { 652 t.Fatalf("failed to synchronise blocks: %v", err) 653 } 654 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 655 } 656 657 // Tests that chain forks are contained within a certain interval of the current 658 // chain head, ensuring that malicious peers cannot waste resources by feeding 659 // long dead chains. 660 func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) } 661 func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) } 662 func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) } 663 func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } 664 func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } 665 func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) } 666 667 func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { 668 t.Parallel() 669 670 tester := newTester() 671 defer tester.terminate() 672 673 chainA := testChainForkLightA 674 chainB := testChainForkLightB 675 tester.newPeer("original", protocol, chainA) 676 tester.newPeer("rewriter", protocol, chainB) 677 678 // Synchronise with the peer and make sure all blocks were retrieved 679 if err := tester.sync("original", nil, mode); err != nil { 680 t.Fatalf("failed to synchronise blocks: %v", err) 681 } 682 assertOwnChain(t, tester, chainA.len()) 683 684 // Synchronise with the second peer and ensure that the fork is rejected to being too old 685 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 686 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 687 } 688 } 689 690 // Tests that chain forks are contained within a certain interval of the current 691 // chain head for short but heavy forks too. These are a bit special because they 692 // take different ancestor lookup paths. 693 func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) } 694 func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) } 695 func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) } 696 func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } 697 func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } 698 func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) } 699 700 func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 701 t.Parallel() 702 703 tester := newTester() 704 defer tester.terminate() 705 706 // Create a long enough forked chain 707 chainA := testChainForkLightA 708 chainB := testChainForkHeavy 709 tester.newPeer("original", protocol, chainA) 710 tester.newPeer("heavy-rewriter", protocol, chainB) 711 712 // Synchronise with the peer and make sure all blocks were retrieved 713 if err := tester.sync("original", nil, mode); err != nil { 714 t.Fatalf("failed to synchronise blocks: %v", err) 715 } 716 assertOwnChain(t, tester, chainA.len()) 717 718 // Synchronise with the second peer and ensure that the fork is rejected to being too old 719 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 720 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 721 } 722 } 723 724 // Tests that an inactive downloader will not accept incoming block headers and 725 // bodies. 726 func TestInactiveDownloader62(t *testing.T) { 727 t.Parallel() 728 729 tester := newTester() 730 defer tester.terminate() 731 732 // Check that neither block headers nor bodies are accepted 733 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 734 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 735 } 736 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 737 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 738 } 739 } 740 741 // Tests that an inactive downloader will not accept incoming block headers, 742 // bodies and receipts. 743 func TestInactiveDownloader63(t *testing.T) { 744 t.Parallel() 745 746 tester := newTester() 747 defer tester.terminate() 748 749 // Check that neither block headers nor bodies are accepted 750 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 751 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 752 } 753 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 754 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 755 } 756 if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { 757 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 758 } 759 } 760 761 // Tests that a canceled download wipes all previously accumulated state. 762 func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) } 763 func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) } 764 func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) } 765 func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } 766 func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } 767 func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) } 768 769 func testCancel(t *testing.T, protocol int, mode SyncMode) { 770 t.Parallel() 771 772 tester := newTester() 773 defer tester.terminate() 774 775 chain := testChainBase.shorten(MaxHeaderFetch) 776 tester.newPeer("peer", protocol, chain) 777 778 // Make sure canceling works with a pristine downloader 779 tester.downloader.Cancel() 780 if !tester.downloader.queue.Idle() { 781 t.Errorf("download queue not idle") 782 } 783 // Synchronise with the peer, but cancel afterwards 784 if err := tester.sync("peer", nil, mode); err != nil { 785 t.Fatalf("failed to synchronise blocks: %v", err) 786 } 787 tester.downloader.Cancel() 788 if !tester.downloader.queue.Idle() { 789 t.Errorf("download queue not idle") 790 } 791 } 792 793 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 794 func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) } 795 func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) } 796 func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) } 797 func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } 798 func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } 799 func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) } 800 801 func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { 802 t.Parallel() 803 804 tester := newTester() 805 defer tester.terminate() 806 807 // Create various peers with various parts of the chain 808 targetPeers := 8 809 chain := testChainBase.shorten(targetPeers * 100) 810 811 for i := 0; i < targetPeers; i++ { 812 id := fmt.Sprintf("peer #%d", i) 813 tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1))) 814 } 815 if err := tester.sync("peer #0", nil, mode); err != nil { 816 t.Fatalf("failed to synchronise blocks: %v", err) 817 } 818 assertOwnChain(t, tester, chain.len()) 819 } 820 821 // Tests that synchronisations behave well in multi-version protocol environments 822 // and not wreak havoc on other nodes in the network. 823 func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) } 824 func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) } 825 func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) } 826 func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } 827 func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } 828 func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) } 829 830 func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { 831 t.Parallel() 832 833 tester := newTester() 834 defer tester.terminate() 835 836 // Create a small enough block chain to download 837 chain := testChainBase.shorten(blockCacheItems - 15) 838 839 // Create peers of every type 840 tester.newPeer("peer 62", 62, chain) 841 tester.newPeer("peer 63", 63, chain) 842 tester.newPeer("peer 64", 64, chain) 843 844 // Synchronise with the requested peer and make sure all blocks were retrieved 845 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 846 t.Fatalf("failed to synchronise blocks: %v", err) 847 } 848 assertOwnChain(t, tester, chain.len()) 849 850 // Check that no peers have been dropped off 851 for _, version := range []int{62, 63, 64} { 852 peer := fmt.Sprintf("peer %d", version) 853 if _, ok := tester.peers[peer]; !ok { 854 t.Errorf("%s dropped", peer) 855 } 856 } 857 } 858 859 // Tests that if a block is empty (e.g. header only), no body request should be 860 // made, and instead the header should be assembled into a whole block in itself. 861 func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) } 862 func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) } 863 func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) } 864 func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } 865 func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } 866 func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) } 867 868 func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { 869 t.Parallel() 870 871 tester := newTester() 872 defer tester.terminate() 873 874 // Create a block chain to download 875 chain := testChainBase 876 tester.newPeer("peer", protocol, chain) 877 878 // Instrument the downloader to signal body requests 879 bodiesHave, receiptsHave := int32(0), int32(0) 880 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 881 atomic.AddInt32(&bodiesHave, int32(len(headers))) 882 } 883 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 884 atomic.AddInt32(&receiptsHave, int32(len(headers))) 885 } 886 // Synchronise with the peer and make sure all blocks were retrieved 887 if err := tester.sync("peer", nil, mode); err != nil { 888 t.Fatalf("failed to synchronise blocks: %v", err) 889 } 890 assertOwnChain(t, tester, chain.len()) 891 892 // Validate the number of block bodies that should have been requested 893 bodiesNeeded, receiptsNeeded := 0, 0 894 for _, block := range chain.blockm { 895 if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 896 bodiesNeeded++ 897 } 898 } 899 for _, receipt := range chain.receiptm { 900 if mode == FastSync && len(receipt) > 0 { 901 receiptsNeeded++ 902 } 903 } 904 if int(bodiesHave) != bodiesNeeded { 905 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 906 } 907 if int(receiptsHave) != receiptsNeeded { 908 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) 909 } 910 } 911 912 // Tests that headers are enqueued continuously, preventing malicious nodes from 913 // stalling the downloader by feeding gapped header chains. 914 func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) } 915 func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) } 916 func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) } 917 func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } 918 func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } 919 func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) } 920 921 func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 922 t.Parallel() 923 924 tester := newTester() 925 defer tester.terminate() 926 927 chain := testChainBase.shorten(blockCacheItems - 15) 928 brokenChain := chain.shorten(chain.len()) 929 delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2]) 930 tester.newPeer("attack", protocol, brokenChain) 931 932 if err := tester.sync("attack", nil, mode); err == nil { 933 t.Fatalf("succeeded attacker synchronisation") 934 } 935 // Synchronise with the valid peer and make sure sync succeeds 936 tester.newPeer("valid", protocol, chain) 937 if err := tester.sync("valid", nil, mode); err != nil { 938 t.Fatalf("failed to synchronise blocks: %v", err) 939 } 940 assertOwnChain(t, tester, chain.len()) 941 } 942 943 // Tests that if requested headers are shifted (i.e. first is missing), the queue 944 // detects the invalid numbering. 945 func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) } 946 func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) } 947 func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) } 948 func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } 949 func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } 950 func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) } 951 952 func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 953 t.Parallel() 954 955 tester := newTester() 956 defer tester.terminate() 957 958 chain := testChainBase.shorten(blockCacheItems - 15) 959 960 // Attempt a full sync with an attacker feeding shifted headers 961 brokenChain := chain.shorten(chain.len()) 962 delete(brokenChain.headerm, brokenChain.chain[1]) 963 delete(brokenChain.blockm, brokenChain.chain[1]) 964 delete(brokenChain.receiptm, brokenChain.chain[1]) 965 tester.newPeer("attack", protocol, brokenChain) 966 if err := tester.sync("attack", nil, mode); err == nil { 967 t.Fatalf("succeeded attacker synchronisation") 968 } 969 970 // Synchronise with the valid peer and make sure sync succeeds 971 tester.newPeer("valid", protocol, chain) 972 if err := tester.sync("valid", nil, mode); err != nil { 973 t.Fatalf("failed to synchronise blocks: %v", err) 974 } 975 assertOwnChain(t, tester, chain.len()) 976 } 977 978 // Tests that upon detecting an invalid header, the recent ones are rolled back 979 // for various failure scenarios. Afterwards a full sync is attempted to make 980 // sure no state was corrupted. 981 func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) } 982 func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } 983 func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) } 984 985 func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { 986 t.Parallel() 987 988 tester := newTester() 989 defer tester.terminate() 990 991 // Create a small enough block chain to download 992 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 993 chain := testChainBase.shorten(targetBlocks) 994 995 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 996 // This should result in the last fsHeaderSafetyNet headers being rolled back. 997 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 998 fastAttackChain := chain.shorten(chain.len()) 999 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) 1000 tester.newPeer("fast-attack", protocol, fastAttackChain) 1001 1002 if err := tester.sync("fast-attack", nil, mode); err == nil { 1003 t.Fatalf("succeeded fast attacker synchronisation") 1004 } 1005 if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { 1006 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 1007 } 1008 1009 // Attempt to sync with an attacker that feeds junk during the block import phase. 1010 // This should result in both the last fsHeaderSafetyNet number of headers being 1011 // rolled back, and also the pivot point being reverted to a non-block status. 1012 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 1013 blockAttackChain := chain.shorten(chain.len()) 1014 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in 1015 delete(blockAttackChain.headerm, blockAttackChain.chain[missing]) 1016 tester.newPeer("block-attack", protocol, blockAttackChain) 1017 1018 if err := tester.sync("block-attack", nil, mode); err == nil { 1019 t.Fatalf("succeeded block attacker synchronisation") 1020 } 1021 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1022 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1023 } 1024 if mode == FastSync { 1025 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1026 t.Errorf("fast sync pivot block #%d not rolled back", head) 1027 } 1028 } 1029 1030 // Attempt to sync with an attacker that withholds promised blocks after the 1031 // fast sync pivot point. This could be a trial to leave the node with a bad 1032 // but already imported pivot block. 1033 withholdAttackChain := chain.shorten(chain.len()) 1034 tester.newPeer("withhold-attack", protocol, withholdAttackChain) 1035 tester.downloader.syncInitHook = func(uint64, uint64) { 1036 for i := missing; i < withholdAttackChain.len(); i++ { 1037 delete(withholdAttackChain.headerm, withholdAttackChain.chain[i]) 1038 } 1039 tester.downloader.syncInitHook = nil 1040 } 1041 if err := tester.sync("withhold-attack", nil, mode); err == nil { 1042 t.Fatalf("succeeded withholding attacker synchronisation") 1043 } 1044 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1045 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1046 } 1047 if mode == FastSync { 1048 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1049 t.Errorf("fast sync pivot block #%d not rolled back", head) 1050 } 1051 } 1052 1053 // synchronise with the valid peer and make sure sync succeeds. Since the last rollback 1054 // should also disable fast syncing for this process, verify that we did a fresh full 1055 // sync. Note, we can't assert anything about the receipts since we won't purge the 1056 // database of them, hence we can't use assertOwnChain. 1057 tester.newPeer("valid", protocol, chain) 1058 if err := tester.sync("valid", nil, mode); err != nil { 1059 t.Fatalf("failed to synchronise blocks: %v", err) 1060 } 1061 if hs := len(tester.ownHeaders); hs != chain.len() { 1062 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len()) 1063 } 1064 if mode != LightSync { 1065 if bs := len(tester.ownBlocks); bs != chain.len() { 1066 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len()) 1067 } 1068 } 1069 } 1070 1071 // Tests that a peer advertising an high TD doesn't get to stall the downloader 1072 // afterwards by not sending any useful hashes. 1073 func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) } 1074 func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) } 1075 func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) } 1076 func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } 1077 func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } 1078 func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) } 1079 1080 func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { 1081 t.Parallel() 1082 1083 tester := newTester() 1084 defer tester.terminate() 1085 1086 chain := testChainBase.shorten(1) 1087 tester.newPeer("attack", protocol, chain) 1088 if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { 1089 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 1090 } 1091 } 1092 1093 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 1094 func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) } 1095 func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) } 1096 func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } 1097 1098 func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { 1099 t.Parallel() 1100 1101 // Define the disconnection requirement for individual hash fetch errors 1102 tests := []struct { 1103 result error 1104 drop bool 1105 }{ 1106 {nil, false}, // Sync succeeded, all is well 1107 {errBusy, false}, // Sync is already in progress, no problem 1108 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1109 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1110 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1111 {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it 1112 {errNoPeers, false}, // No peers to download from, soft race, no issue 1113 {errTimeout, true}, // No hashes received in due time, drop the peer 1114 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1115 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1116 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1117 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1118 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1119 {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin 1120 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1121 } 1122 // Run the tests and check disconnection status 1123 tester := newTester() 1124 defer tester.terminate() 1125 chain := testChainBase.shorten(1) 1126 1127 for i, tt := range tests { 1128 // Register a new peer and ensure it's presence 1129 id := fmt.Sprintf("test %d", i) 1130 if err := tester.newPeer(id, protocol, chain); err != nil { 1131 t.Fatalf("test %d: failed to register new peer: %v", i, err) 1132 } 1133 if _, ok := tester.peers[id]; !ok { 1134 t.Fatalf("test %d: registered peer not found", i) 1135 } 1136 // Simulate a synchronisation and check the required result 1137 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1138 1139 tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) 1140 if _, ok := tester.peers[id]; !ok != tt.drop { 1141 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1142 } 1143 } 1144 } 1145 1146 // Tests that synchronisation progress (origin block number, current block number 1147 // and highest block number) is tracked and updated correctly. 1148 func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) } 1149 func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) } 1150 func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) } 1151 func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } 1152 func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } 1153 func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) } 1154 1155 func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1156 t.Parallel() 1157 1158 tester := newTester() 1159 defer tester.terminate() 1160 chain := testChainBase.shorten(blockCacheItems - 15) 1161 1162 // Set a sync init hook to catch progress changes 1163 starting := make(chan struct{}) 1164 progress := make(chan struct{}) 1165 1166 tester.downloader.syncInitHook = func(origin, latest uint64) { 1167 starting <- struct{}{} 1168 <-progress 1169 } 1170 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1171 1172 // Synchronise half the blocks and check initial progress 1173 tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2)) 1174 pending := new(sync.WaitGroup) 1175 pending.Add(1) 1176 1177 go func() { 1178 defer pending.Done() 1179 if err := tester.sync("peer-half", nil, mode); err != nil { 1180 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1181 } 1182 }() 1183 <-starting 1184 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1185 HighestBlock: uint64(chain.len()/2 - 1), 1186 }) 1187 progress <- struct{}{} 1188 pending.Wait() 1189 1190 // Synchronise all the blocks and check continuation progress 1191 tester.newPeer("peer-full", protocol, chain) 1192 pending.Add(1) 1193 go func() { 1194 defer pending.Done() 1195 if err := tester.sync("peer-full", nil, mode); err != nil { 1196 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1197 } 1198 }() 1199 <-starting 1200 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1201 StartingBlock: uint64(chain.len()/2 - 1), 1202 CurrentBlock: uint64(chain.len()/2 - 1), 1203 HighestBlock: uint64(chain.len() - 1), 1204 }) 1205 1206 // Check final progress after successful sync 1207 progress <- struct{}{} 1208 pending.Wait() 1209 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1210 StartingBlock: uint64(chain.len()/2 - 1), 1211 CurrentBlock: uint64(chain.len() - 1), 1212 HighestBlock: uint64(chain.len() - 1), 1213 }) 1214 } 1215 1216 func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { 1217 // Mark this method as a helper to report errors at callsite, not in here 1218 t.Helper() 1219 1220 p := d.Progress() 1221 p.KnownStates, p.PulledStates = 0, 0 1222 want.KnownStates, want.PulledStates = 0, 0 1223 if p != want { 1224 t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) 1225 } 1226 } 1227 1228 // Tests that synchronisation progress (origin block number and highest block 1229 // number) is tracked and updated correctly in case of a fork (or manual head 1230 // revertal). 1231 func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) } 1232 func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) } 1233 func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) } 1234 func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } 1235 func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } 1236 func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) } 1237 1238 func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1239 t.Parallel() 1240 1241 tester := newTester() 1242 defer tester.terminate() 1243 chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch) 1244 chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch) 1245 1246 // Set a sync init hook to catch progress changes 1247 starting := make(chan struct{}) 1248 progress := make(chan struct{}) 1249 1250 tester.downloader.syncInitHook = func(origin, latest uint64) { 1251 starting <- struct{}{} 1252 <-progress 1253 } 1254 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1255 1256 // Synchronise with one of the forks and check progress 1257 tester.newPeer("fork A", protocol, chainA) 1258 pending := new(sync.WaitGroup) 1259 pending.Add(1) 1260 go func() { 1261 defer pending.Done() 1262 if err := tester.sync("fork A", nil, mode); err != nil { 1263 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1264 } 1265 }() 1266 <-starting 1267 1268 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1269 HighestBlock: uint64(chainA.len() - 1), 1270 }) 1271 progress <- struct{}{} 1272 pending.Wait() 1273 1274 // Simulate a successful sync above the fork 1275 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1276 1277 // Synchronise with the second fork and check progress resets 1278 tester.newPeer("fork B", protocol, chainB) 1279 pending.Add(1) 1280 go func() { 1281 defer pending.Done() 1282 if err := tester.sync("fork B", nil, mode); err != nil { 1283 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1284 } 1285 }() 1286 <-starting 1287 checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ 1288 StartingBlock: uint64(testChainBase.len()) - 1, 1289 CurrentBlock: uint64(chainA.len() - 1), 1290 HighestBlock: uint64(chainB.len() - 1), 1291 }) 1292 1293 // Check final progress after successful sync 1294 progress <- struct{}{} 1295 pending.Wait() 1296 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1297 StartingBlock: uint64(testChainBase.len()) - 1, 1298 CurrentBlock: uint64(chainB.len() - 1), 1299 HighestBlock: uint64(chainB.len() - 1), 1300 }) 1301 } 1302 1303 // Tests that if synchronisation is aborted due to some failure, then the progress 1304 // origin is not updated in the next sync cycle, as it should be considered the 1305 // continuation of the previous sync and not a new instance. 1306 func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) } 1307 func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) } 1308 func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) } 1309 func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } 1310 func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } 1311 func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) } 1312 1313 func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1314 t.Parallel() 1315 1316 tester := newTester() 1317 defer tester.terminate() 1318 chain := testChainBase.shorten(blockCacheItems - 15) 1319 1320 // Set a sync init hook to catch progress changes 1321 starting := make(chan struct{}) 1322 progress := make(chan struct{}) 1323 1324 tester.downloader.syncInitHook = func(origin, latest uint64) { 1325 starting <- struct{}{} 1326 <-progress 1327 } 1328 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1329 1330 // Attempt a full sync with a faulty peer 1331 brokenChain := chain.shorten(chain.len()) 1332 missing := brokenChain.len() / 2 1333 delete(brokenChain.headerm, brokenChain.chain[missing]) 1334 delete(brokenChain.blockm, brokenChain.chain[missing]) 1335 delete(brokenChain.receiptm, brokenChain.chain[missing]) 1336 tester.newPeer("faulty", protocol, brokenChain) 1337 1338 pending := new(sync.WaitGroup) 1339 pending.Add(1) 1340 go func() { 1341 defer pending.Done() 1342 if err := tester.sync("faulty", nil, mode); err == nil { 1343 panic("succeeded faulty synchronisation") 1344 } 1345 }() 1346 <-starting 1347 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1348 HighestBlock: uint64(brokenChain.len() - 1), 1349 }) 1350 progress <- struct{}{} 1351 pending.Wait() 1352 afterFailedSync := tester.downloader.Progress() 1353 1354 // Synchronise with a good peer and check that the progress origin remind the same 1355 // after a failure 1356 tester.newPeer("valid", protocol, chain) 1357 pending.Add(1) 1358 go func() { 1359 defer pending.Done() 1360 if err := tester.sync("valid", nil, mode); err != nil { 1361 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1362 } 1363 }() 1364 <-starting 1365 checkProgress(t, tester.downloader, "completing", afterFailedSync) 1366 1367 // Check final progress after successful sync 1368 progress <- struct{}{} 1369 pending.Wait() 1370 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1371 CurrentBlock: uint64(chain.len() - 1), 1372 HighestBlock: uint64(chain.len() - 1), 1373 }) 1374 } 1375 1376 // Tests that if an attacker fakes a chain height, after the attack is detected, 1377 // the progress height is successfully reduced at the next sync invocation. 1378 func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) } 1379 func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) } 1380 func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) } 1381 func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } 1382 func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } 1383 func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) } 1384 1385 func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1386 t.Parallel() 1387 1388 tester := newTester() 1389 defer tester.terminate() 1390 chain := testChainBase.shorten(blockCacheItems - 15) 1391 1392 // Set a sync init hook to catch progress changes 1393 starting := make(chan struct{}) 1394 progress := make(chan struct{}) 1395 tester.downloader.syncInitHook = func(origin, latest uint64) { 1396 starting <- struct{}{} 1397 <-progress 1398 } 1399 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1400 1401 // Create and sync with an attacker that promises a higher chain than available. 1402 brokenChain := chain.shorten(chain.len()) 1403 numMissing := 5 1404 for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- { 1405 delete(brokenChain.headerm, brokenChain.chain[i]) 1406 } 1407 tester.newPeer("attack", protocol, brokenChain) 1408 1409 pending := new(sync.WaitGroup) 1410 pending.Add(1) 1411 go func() { 1412 defer pending.Done() 1413 if err := tester.sync("attack", nil, mode); err == nil { 1414 panic("succeeded attacker synchronisation") 1415 } 1416 }() 1417 <-starting 1418 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1419 HighestBlock: uint64(brokenChain.len() - 1), 1420 }) 1421 progress <- struct{}{} 1422 pending.Wait() 1423 afterFailedSync := tester.downloader.Progress() 1424 1425 // Synchronise with a good peer and check that the progress height has been reduced to 1426 // the true value. 1427 validChain := chain.shorten(chain.len() - numMissing) 1428 tester.newPeer("valid", protocol, validChain) 1429 pending.Add(1) 1430 1431 go func() { 1432 defer pending.Done() 1433 if err := tester.sync("valid", nil, mode); err != nil { 1434 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1435 } 1436 }() 1437 <-starting 1438 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1439 CurrentBlock: afterFailedSync.CurrentBlock, 1440 HighestBlock: uint64(validChain.len() - 1), 1441 }) 1442 1443 // Check final progress after successful sync. 1444 progress <- struct{}{} 1445 pending.Wait() 1446 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1447 CurrentBlock: uint64(validChain.len() - 1), 1448 HighestBlock: uint64(validChain.len() - 1), 1449 }) 1450 } 1451 1452 // This test reproduces an issue where unexpected deliveries would 1453 // block indefinitely if they arrived at the right time. 1454 func TestDeliverHeadersHang(t *testing.T) { 1455 t.Parallel() 1456 1457 testCases := []struct { 1458 protocol int 1459 syncMode SyncMode 1460 }{ 1461 {62, FullSync}, 1462 {63, FullSync}, 1463 {63, FastSync}, 1464 {64, FullSync}, 1465 {64, FastSync}, 1466 {64, LightSync}, 1467 } 1468 for _, tc := range testCases { 1469 t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) { 1470 t.Parallel() 1471 testDeliverHeadersHang(t, tc.protocol, tc.syncMode) 1472 }) 1473 } 1474 } 1475 1476 func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { 1477 master := newTester() 1478 defer master.terminate() 1479 chain := testChainBase.shorten(15) 1480 1481 for i := 0; i < 200; i++ { 1482 tester := newTester() 1483 tester.peerDb = master.peerDb 1484 tester.newPeer("peer", protocol, chain) 1485 1486 // Whenever the downloader requests headers, flood it with 1487 // a lot of unrequested header deliveries. 1488 tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ 1489 peer: tester.downloader.peers.peers["peer"].peer, 1490 tester: tester, 1491 } 1492 if err := tester.sync("peer", nil, mode); err != nil { 1493 t.Errorf("test %d: sync failed: %v", i, err) 1494 } 1495 tester.terminate() 1496 } 1497 } 1498 1499 type floodingTestPeer struct { 1500 peer Peer 1501 tester *downloadTester 1502 } 1503 1504 func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } 1505 func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { 1506 return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) 1507 } 1508 func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { 1509 return ftp.peer.RequestBodies(hashes) 1510 } 1511 func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { 1512 return ftp.peer.RequestReceipts(hashes) 1513 } 1514 func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { 1515 return ftp.peer.RequestNodeData(hashes) 1516 } 1517 1518 func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { 1519 deliveriesDone := make(chan struct{}, 500) 1520 for i := 0; i < cap(deliveriesDone)-1; i++ { 1521 peer := fmt.Sprintf("fake-peer%d", i) 1522 go func() { 1523 ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) 1524 deliveriesDone <- struct{}{} 1525 }() 1526 } 1527 1528 // None of the extra deliveries should block. 1529 timeout := time.After(60 * time.Second) 1530 launched := false 1531 for i := 0; i < cap(deliveriesDone); i++ { 1532 select { 1533 case <-deliveriesDone: 1534 if !launched { 1535 // Start delivering the requested headers 1536 // after one of the flooding responses has arrived. 1537 go func() { 1538 ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) 1539 deliveriesDone <- struct{}{} 1540 }() 1541 launched = true 1542 } 1543 case <-timeout: 1544 panic("blocked") 1545 } 1546 } 1547 return nil 1548 } 1549 1550 func TestRemoteHeaderRequestSpan(t *testing.T) { 1551 testCases := []struct { 1552 remoteHeight uint64 1553 localHeight uint64 1554 expected []int 1555 }{ 1556 // Remote is way higher. We should ask for the remote head and go backwards 1557 {1500, 1000, 1558 []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, 1559 }, 1560 {15000, 13006, 1561 []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, 1562 }, 1563 //Remote is pretty close to us. We don't have to fetch as many 1564 {1200, 1150, 1565 []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, 1566 }, 1567 // Remote is equal to us (so on a fork with higher td) 1568 // We should get the closest couple of ancestors 1569 {1500, 1500, 1570 []int{1497, 1499}, 1571 }, 1572 // We're higher than the remote! Odd 1573 {1000, 1500, 1574 []int{997, 999}, 1575 }, 1576 // Check some weird edgecases that it behaves somewhat rationally 1577 {0, 1500, 1578 []int{0, 2}, 1579 }, 1580 {6000000, 0, 1581 []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, 1582 }, 1583 {0, 0, 1584 []int{0, 2}, 1585 }, 1586 } 1587 reqs := func(from, count, span int) []int { 1588 var r []int 1589 num := from 1590 for len(r) < count { 1591 r = append(r, num) 1592 num += span + 1 1593 } 1594 return r 1595 } 1596 for i, tt := range testCases { 1597 from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) 1598 data := reqs(int(from), count, span) 1599 1600 if max != uint64(data[len(data)-1]) { 1601 t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) 1602 } 1603 failed := false 1604 if len(data) != len(tt.expected) { 1605 failed = true 1606 t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) 1607 } else { 1608 for j, n := range data { 1609 if n != tt.expected[j] { 1610 failed = true 1611 break 1612 } 1613 } 1614 } 1615 if failed { 1616 res := strings.Replace(fmt.Sprint(data), " ", ",", -1) 1617 exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) 1618 t.Logf("got: %v\n", res) 1619 t.Logf("exp: %v\n", exp) 1620 t.Errorf("test %d: wrong values", i) 1621 } 1622 } 1623 } 1624 1625 // Tests that peers below a pre-configured checkpoint block are prevented from 1626 // being fast-synced from, avoiding potential cheap eclipse attacks. 1627 func TestCheckpointEnforcement62(t *testing.T) { testCheckpointEnforcement(t, 62, FullSync) } 1628 func TestCheckpointEnforcement63Full(t *testing.T) { testCheckpointEnforcement(t, 63, FullSync) } 1629 func TestCheckpointEnforcement63Fast(t *testing.T) { testCheckpointEnforcement(t, 63, FastSync) } 1630 func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) } 1631 func TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) } 1632 func TestCheckpointEnforcement64Light(t *testing.T) { testCheckpointEnforcement(t, 64, LightSync) } 1633 1634 func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) { 1635 t.Parallel() 1636 1637 // Create a new tester with a particular hard coded checkpoint block 1638 tester := newTester() 1639 defer tester.terminate() 1640 1641 tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256 1642 chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) 1643 1644 // Attempt to sync with the peer and validate the result 1645 tester.newPeer("peer", protocol, chain) 1646 1647 var expect error 1648 if mode == FastSync || mode == LightSync { 1649 expect = errUnsyncedPeer 1650 } 1651 if err := tester.sync("peer", nil, mode); err != expect { 1652 t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) 1653 } 1654 if mode == FastSync || mode == LightSync { 1655 assertOwnChain(t, tester, 1) 1656 } else { 1657 assertOwnChain(t, tester, chain.len()) 1658 } 1659 }