github.com/ebceco/ebc@v1.8.19-0.20190309150932-8cb0b9e06484/eth/downloader/downloader_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "strings" 24 "sync" 25 "sync/atomic" 26 "testing" 27 "time" 28 29 ethereum "github.com/ebceco/ebc" 30 "github.com/ebceco/ebc/common" 31 "github.com/ebceco/ebc/core/types" 32 "github.com/ebceco/ebc/ethdb" 33 "github.com/ebceco/ebc/event" 34 "github.com/ebceco/ebc/trie" 35 ) 36 37 // Reduce some of the parameters to make the tester faster. 38 func init() { 39 MaxForkAncestry = uint64(10000) 40 blockCacheItems = 1024 41 fsHeaderContCheck = 500 * time.Millisecond 42 } 43 44 // downloadTester is a test simulator for mocking out local block chain. 45 type downloadTester struct { 46 downloader *Downloader 47 48 genesis *types.Block // Genesis blocks used by the tester and peers 49 stateDb ethdb.Database // Database used by the tester for syncing from peers 50 peerDb ethdb.Database // Database of the peers containing all data 51 peers map[string]*downloadTesterPeer 52 53 ownHashes []common.Hash // Hash chain belonging to the tester 54 ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester 55 ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester 56 ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester 57 ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain 58 59 lock sync.RWMutex 60 } 61 62 // newTester creates a new downloader test mocker. 63 func newTester() *downloadTester { 64 tester := &downloadTester{ 65 genesis: testGenesis, 66 peerDb: testDB, 67 peers: make(map[string]*downloadTesterPeer), 68 ownHashes: []common.Hash{testGenesis.Hash()}, 69 ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 70 ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 71 ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, 72 ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 73 } 74 tester.stateDb = ethdb.NewMemDatabase() 75 tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) 76 tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer) 77 return tester 78 } 79 80 // terminate aborts any operations on the embedded downloader and releases all 81 // held resources. 82 func (dl *downloadTester) terminate() { 83 dl.downloader.Terminate() 84 } 85 86 // sync starts synchronizing with a remote peer, blocking until it completes. 87 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 88 dl.lock.RLock() 89 hash := dl.peers[id].chain.headBlock().Hash() 90 // If no particular TD was requested, load from the peer's blockchain 91 if td == nil { 92 td = dl.peers[id].chain.td(hash) 93 } 94 dl.lock.RUnlock() 95 96 // Synchronise with the chosen peer and ensure proper cleanup afterwards 97 err := dl.downloader.synchronise(id, hash, td, mode) 98 select { 99 case <-dl.downloader.cancelCh: 100 // Ok, downloader fully cancelled after sync cycle 101 default: 102 // Downloader is still accepting packets, can block a peer up 103 panic("downloader active post sync cycle") // panic will be caught by tester 104 } 105 return err 106 } 107 108 // HasHeader checks if a header is present in the testers canonical chain. 109 func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { 110 return dl.GetHeaderByHash(hash) != nil 111 } 112 113 // HasBlock checks if a block is present in the testers canonical chain. 114 func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { 115 return dl.GetBlockByHash(hash) != nil 116 } 117 118 // HasFastBlock checks if a block is present in the testers canonical chain. 119 func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { 120 dl.lock.RLock() 121 defer dl.lock.RUnlock() 122 123 _, ok := dl.ownReceipts[hash] 124 return ok 125 } 126 127 // GetHeader retrieves a header from the testers canonical chain. 128 func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { 129 dl.lock.RLock() 130 defer dl.lock.RUnlock() 131 132 return dl.ownHeaders[hash] 133 } 134 135 // GetBlock retrieves a block from the testers canonical chain. 136 func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { 137 dl.lock.RLock() 138 defer dl.lock.RUnlock() 139 140 return dl.ownBlocks[hash] 141 } 142 143 // CurrentHeader retrieves the current head header from the canonical chain. 144 func (dl *downloadTester) CurrentHeader() *types.Header { 145 dl.lock.RLock() 146 defer dl.lock.RUnlock() 147 148 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 149 if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { 150 return header 151 } 152 } 153 return dl.genesis.Header() 154 } 155 156 // CurrentBlock retrieves the current head block from the canonical chain. 157 func (dl *downloadTester) CurrentBlock() *types.Block { 158 dl.lock.RLock() 159 defer dl.lock.RUnlock() 160 161 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 162 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 163 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 164 return block 165 } 166 } 167 } 168 return dl.genesis 169 } 170 171 // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. 172 func (dl *downloadTester) CurrentFastBlock() *types.Block { 173 dl.lock.RLock() 174 defer dl.lock.RUnlock() 175 176 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 177 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 178 return block 179 } 180 } 181 return dl.genesis 182 } 183 184 // FastSyncCommitHead manually sets the head block to a given hash. 185 func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { 186 // For now only check that the state trie is correct 187 if block := dl.GetBlockByHash(hash); block != nil { 188 _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0) 189 return err 190 } 191 return fmt.Errorf("non existent block: %x", hash[:4]) 192 } 193 194 // GetTd retrieves the block's total difficulty from the canonical chain. 195 func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { 196 dl.lock.RLock() 197 defer dl.lock.RUnlock() 198 199 return dl.ownChainTd[hash] 200 } 201 202 // InsertHeaderChain injects a new batch of headers into the simulated chain. 203 func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) { 204 dl.lock.Lock() 205 defer dl.lock.Unlock() 206 207 // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors 208 if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok { 209 return 0, errors.New("unknown parent") 210 } 211 for i := 1; i < len(headers); i++ { 212 if headers[i].ParentHash != headers[i-1].Hash() { 213 return i, errors.New("unknown parent") 214 } 215 } 216 // Do a full insert if pre-checks passed 217 for i, header := range headers { 218 if _, ok := dl.ownHeaders[header.Hash()]; ok { 219 continue 220 } 221 if _, ok := dl.ownHeaders[header.ParentHash]; !ok { 222 return i, errors.New("unknown parent") 223 } 224 dl.ownHashes = append(dl.ownHashes, header.Hash()) 225 dl.ownHeaders[header.Hash()] = header 226 dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty) 227 } 228 return len(headers), nil 229 } 230 231 // InsertChain injects a new batch of blocks into the simulated chain. 232 func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) { 233 dl.lock.Lock() 234 defer dl.lock.Unlock() 235 236 for i, block := range blocks { 237 if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { 238 return i, errors.New("unknown parent") 239 } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { 240 return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err) 241 } 242 if _, ok := dl.ownHeaders[block.Hash()]; !ok { 243 dl.ownHashes = append(dl.ownHashes, block.Hash()) 244 dl.ownHeaders[block.Hash()] = block.Header() 245 } 246 dl.ownBlocks[block.Hash()] = block 247 dl.ownReceipts[block.Hash()] = make(types.Receipts, 0) 248 dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) 249 dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty()) 250 } 251 return len(blocks), nil 252 } 253 254 // InsertReceiptChain injects a new batch of receipts into the simulated chain. 255 func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (i int, err error) { 256 dl.lock.Lock() 257 defer dl.lock.Unlock() 258 259 for i := 0; i < len(blocks) && i < len(receipts); i++ { 260 if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { 261 return i, errors.New("unknown owner") 262 } 263 if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { 264 return i, errors.New("unknown parent") 265 } 266 dl.ownBlocks[blocks[i].Hash()] = blocks[i] 267 dl.ownReceipts[blocks[i].Hash()] = receipts[i] 268 } 269 return len(blocks), nil 270 } 271 272 // Rollback removes some recently added elements from the chain. 273 func (dl *downloadTester) Rollback(hashes []common.Hash) { 274 dl.lock.Lock() 275 defer dl.lock.Unlock() 276 277 for i := len(hashes) - 1; i >= 0; i-- { 278 if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { 279 dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] 280 } 281 delete(dl.ownChainTd, hashes[i]) 282 delete(dl.ownHeaders, hashes[i]) 283 delete(dl.ownReceipts, hashes[i]) 284 delete(dl.ownBlocks, hashes[i]) 285 } 286 } 287 288 // newPeer registers a new block download source into the downloader. 289 func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error { 290 dl.lock.Lock() 291 defer dl.lock.Unlock() 292 293 peer := &downloadTesterPeer{dl: dl, id: id, chain: chain} 294 dl.peers[id] = peer 295 return dl.downloader.RegisterPeer(id, version, peer) 296 } 297 298 // dropPeer simulates a hard peer removal from the connection pool. 299 func (dl *downloadTester) dropPeer(id string) { 300 dl.lock.Lock() 301 defer dl.lock.Unlock() 302 303 delete(dl.peers, id) 304 dl.downloader.UnregisterPeer(id) 305 } 306 307 type downloadTesterPeer struct { 308 dl *downloadTester 309 id string 310 lock sync.RWMutex 311 chain *testChain 312 missingStates map[common.Hash]bool // State entries that fast sync should not return 313 } 314 315 // Head constructs a function to retrieve a peer's current head hash 316 // and total difficulty. 317 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 318 b := dlp.chain.headBlock() 319 return b.Hash(), dlp.chain.td(b.Hash()) 320 } 321 322 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 323 // origin; associated with a particular peer in the download tester. The returned 324 // function can be used to retrieve batches of headers from the particular peer. 325 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { 326 if reverse { 327 panic("reverse header requests not supported") 328 } 329 330 result := dlp.chain.headersByHash(origin, amount, skip) 331 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 332 return nil 333 } 334 335 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 336 // origin; associated with a particular peer in the download tester. The returned 337 // function can be used to retrieve batches of headers from the particular peer. 338 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { 339 if reverse { 340 panic("reverse header requests not supported") 341 } 342 343 result := dlp.chain.headersByNumber(origin, amount, skip) 344 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 345 return nil 346 } 347 348 // RequestBodies constructs a getBlockBodies method associated with a particular 349 // peer in the download tester. The returned function can be used to retrieve 350 // batches of block bodies from the particularly requested peer. 351 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { 352 txs, uncles := dlp.chain.bodies(hashes) 353 go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles) 354 return nil 355 } 356 357 // RequestReceipts constructs a getReceipts method associated with a particular 358 // peer in the download tester. The returned function can be used to retrieve 359 // batches of block receipts from the particularly requested peer. 360 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { 361 receipts := dlp.chain.receipts(hashes) 362 go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts) 363 return nil 364 } 365 366 // RequestNodeData constructs a getNodeData method associated with a particular 367 // peer in the download tester. The returned function can be used to retrieve 368 // batches of node state data from the particularly requested peer. 369 func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { 370 dlp.dl.lock.RLock() 371 defer dlp.dl.lock.RUnlock() 372 373 results := make([][]byte, 0, len(hashes)) 374 for _, hash := range hashes { 375 if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { 376 if !dlp.missingStates[hash] { 377 results = append(results, data) 378 } 379 } 380 } 381 go dlp.dl.downloader.DeliverNodeData(dlp.id, results) 382 return nil 383 } 384 385 // assertOwnChain checks if the local chain contains the correct number of items 386 // of the various chain components. 387 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 388 // Mark this method as a helper to report errors at callsite, not in here 389 t.Helper() 390 391 assertOwnForkedChain(t, tester, 1, []int{length}) 392 } 393 394 // assertOwnForkedChain checks if the local forked chain contains the correct 395 // number of items of the various chain components. 396 func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { 397 // Mark this method as a helper to report errors at callsite, not in here 398 t.Helper() 399 400 // Initialize the counters for the first fork 401 headers, blocks, receipts := lengths[0], lengths[0], lengths[0] 402 403 // Update the counters for each subsequent fork 404 for _, length := range lengths[1:] { 405 headers += length - common 406 blocks += length - common 407 receipts += length - common 408 } 409 if tester.downloader.mode == LightSync { 410 blocks, receipts = 1, 1 411 } 412 if hs := len(tester.ownHeaders); hs != headers { 413 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 414 } 415 if bs := len(tester.ownBlocks); bs != blocks { 416 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 417 } 418 if rs := len(tester.ownReceipts); rs != receipts { 419 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 420 } 421 } 422 423 // Tests that simple synchronization against a canonical chain works correctly. 424 // In this test common ancestor lookup should be short circuited and not require 425 // binary searching. 426 func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) } 427 func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) } 428 func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) } 429 func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } 430 func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } 431 func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) } 432 433 func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { 434 t.Parallel() 435 436 tester := newTester() 437 defer tester.terminate() 438 439 // Create a small enough block chain to download 440 chain := testChainBase.shorten(blockCacheItems - 15) 441 tester.newPeer("peer", protocol, chain) 442 443 // Synchronise with the peer and make sure all relevant data was retrieved 444 if err := tester.sync("peer", nil, mode); err != nil { 445 t.Fatalf("failed to synchronise blocks: %v", err) 446 } 447 assertOwnChain(t, tester, chain.len()) 448 } 449 450 // Tests that if a large batch of blocks are being downloaded, it is throttled 451 // until the cached blocks are retrieved. 452 func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) } 453 func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) } 454 func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) } 455 func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } 456 func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } 457 458 func testThrottling(t *testing.T, protocol int, mode SyncMode) { 459 t.Parallel() 460 tester := newTester() 461 defer tester.terminate() 462 463 // Create a long block chain to download and the tester 464 targetBlocks := testChainBase.len() - 1 465 tester.newPeer("peer", protocol, testChainBase) 466 467 // Wrap the importer to allow stepping 468 blocked, proceed := uint32(0), make(chan struct{}) 469 tester.downloader.chainInsertHook = func(results []*fetchResult) { 470 atomic.StoreUint32(&blocked, uint32(len(results))) 471 <-proceed 472 } 473 // Start a synchronisation concurrently 474 errc := make(chan error) 475 go func() { 476 errc <- tester.sync("peer", nil, mode) 477 }() 478 // Iteratively take some blocks, always checking the retrieval count 479 for { 480 // Check the retrieval count synchronously (! reason for this ugly block) 481 tester.lock.RLock() 482 retrieved := len(tester.ownBlocks) 483 tester.lock.RUnlock() 484 if retrieved >= targetBlocks+1 { 485 break 486 } 487 // Wait a bit for sync to throttle itself 488 var cached, frozen int 489 for start := time.Now(); time.Since(start) < 3*time.Second; { 490 time.Sleep(25 * time.Millisecond) 491 492 tester.lock.Lock() 493 tester.downloader.queue.lock.Lock() 494 cached = len(tester.downloader.queue.blockDonePool) 495 if mode == FastSync { 496 if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { 497 cached = receipts 498 } 499 } 500 frozen = int(atomic.LoadUint32(&blocked)) 501 retrieved = len(tester.ownBlocks) 502 tester.downloader.queue.lock.Unlock() 503 tester.lock.Unlock() 504 505 if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { 506 break 507 } 508 } 509 // Make sure we filled up the cache, then exhaust it 510 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 511 512 tester.lock.RLock() 513 retrieved = len(tester.ownBlocks) 514 tester.lock.RUnlock() 515 if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { 516 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1) 517 } 518 // Permit the blocked blocks to import 519 if atomic.LoadUint32(&blocked) > 0 { 520 atomic.StoreUint32(&blocked, uint32(0)) 521 proceed <- struct{}{} 522 } 523 } 524 // Check that we haven't pulled more blocks than available 525 assertOwnChain(t, tester, targetBlocks+1) 526 if err := <-errc; err != nil { 527 t.Fatalf("block synchronization failed: %v", err) 528 } 529 } 530 531 // Tests that simple synchronization against a forked chain works correctly. In 532 // this test common ancestor lookup should *not* be short circuited, and a full 533 // binary search should be executed. 534 func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) } 535 func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) } 536 func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) } 537 func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } 538 func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } 539 func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) } 540 541 func testForkedSync(t *testing.T, protocol int, mode SyncMode) { 542 t.Parallel() 543 544 tester := newTester() 545 defer tester.terminate() 546 547 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 548 chainB := testChainForkLightB.shorten(testChainBase.len() + 80) 549 tester.newPeer("fork A", protocol, chainA) 550 tester.newPeer("fork B", protocol, chainB) 551 552 // Synchronise with the peer and make sure all blocks were retrieved 553 if err := tester.sync("fork A", nil, mode); err != nil { 554 t.Fatalf("failed to synchronise blocks: %v", err) 555 } 556 assertOwnChain(t, tester, chainA.len()) 557 558 // Synchronise with the second peer and make sure that fork is pulled too 559 if err := tester.sync("fork B", nil, mode); err != nil { 560 t.Fatalf("failed to synchronise blocks: %v", err) 561 } 562 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 563 } 564 565 // Tests that synchronising against a much shorter but much heavyer fork works 566 // corrently and is not dropped. 567 func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) } 568 func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) } 569 func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) } 570 func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } 571 func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } 572 func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) } 573 574 func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 575 t.Parallel() 576 577 tester := newTester() 578 defer tester.terminate() 579 580 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 581 chainB := testChainForkHeavy.shorten(testChainBase.len() + 80) 582 tester.newPeer("light", protocol, chainA) 583 tester.newPeer("heavy", protocol, chainB) 584 585 // Synchronise with the peer and make sure all blocks were retrieved 586 if err := tester.sync("light", nil, mode); err != nil { 587 t.Fatalf("failed to synchronise blocks: %v", err) 588 } 589 assertOwnChain(t, tester, chainA.len()) 590 591 // Synchronise with the second peer and make sure that fork is pulled too 592 if err := tester.sync("heavy", nil, mode); err != nil { 593 t.Fatalf("failed to synchronise blocks: %v", err) 594 } 595 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 596 } 597 598 // Tests that chain forks are contained within a certain interval of the current 599 // chain head, ensuring that malicious peers cannot waste resources by feeding 600 // long dead chains. 601 func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) } 602 func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) } 603 func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) } 604 func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } 605 func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } 606 func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) } 607 608 func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { 609 t.Parallel() 610 611 tester := newTester() 612 defer tester.terminate() 613 614 chainA := testChainForkLightA 615 chainB := testChainForkLightB 616 tester.newPeer("original", protocol, chainA) 617 tester.newPeer("rewriter", protocol, chainB) 618 619 // Synchronise with the peer and make sure all blocks were retrieved 620 if err := tester.sync("original", nil, mode); err != nil { 621 t.Fatalf("failed to synchronise blocks: %v", err) 622 } 623 assertOwnChain(t, tester, chainA.len()) 624 625 // Synchronise with the second peer and ensure that the fork is rejected to being too old 626 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 627 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 628 } 629 } 630 631 // Tests that chain forks are contained within a certain interval of the current 632 // chain head for short but heavy forks too. These are a bit special because they 633 // take different ancestor lookup paths. 634 func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) } 635 func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) } 636 func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) } 637 func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } 638 func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } 639 func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) } 640 641 func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 642 t.Parallel() 643 644 tester := newTester() 645 defer tester.terminate() 646 647 // Create a long enough forked chain 648 chainA := testChainForkLightA 649 chainB := testChainForkHeavy 650 tester.newPeer("original", protocol, chainA) 651 tester.newPeer("heavy-rewriter", protocol, chainB) 652 653 // Synchronise with the peer and make sure all blocks were retrieved 654 if err := tester.sync("original", nil, mode); err != nil { 655 t.Fatalf("failed to synchronise blocks: %v", err) 656 } 657 assertOwnChain(t, tester, chainA.len()) 658 659 // Synchronise with the second peer and ensure that the fork is rejected to being too old 660 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 661 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 662 } 663 } 664 665 // Tests that an inactive downloader will not accept incoming block headers and 666 // bodies. 667 func TestInactiveDownloader62(t *testing.T) { 668 t.Parallel() 669 670 tester := newTester() 671 defer tester.terminate() 672 673 // Check that neither block headers nor bodies are accepted 674 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 675 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 676 } 677 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 678 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 679 } 680 } 681 682 // Tests that an inactive downloader will not accept incoming block headers, 683 // bodies and receipts. 684 func TestInactiveDownloader63(t *testing.T) { 685 t.Parallel() 686 687 tester := newTester() 688 defer tester.terminate() 689 690 // Check that neither block headers nor bodies are accepted 691 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 692 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 693 } 694 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 695 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 696 } 697 if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { 698 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 699 } 700 } 701 702 // Tests that a canceled download wipes all previously accumulated state. 703 func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) } 704 func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) } 705 func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) } 706 func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } 707 func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } 708 func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) } 709 710 func testCancel(t *testing.T, protocol int, mode SyncMode) { 711 t.Parallel() 712 713 tester := newTester() 714 defer tester.terminate() 715 716 chain := testChainBase.shorten(MaxHeaderFetch) 717 tester.newPeer("peer", protocol, chain) 718 719 // Make sure canceling works with a pristine downloader 720 tester.downloader.Cancel() 721 if !tester.downloader.queue.Idle() { 722 t.Errorf("download queue not idle") 723 } 724 // Synchronise with the peer, but cancel afterwards 725 if err := tester.sync("peer", nil, mode); err != nil { 726 t.Fatalf("failed to synchronise blocks: %v", err) 727 } 728 tester.downloader.Cancel() 729 if !tester.downloader.queue.Idle() { 730 t.Errorf("download queue not idle") 731 } 732 } 733 734 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 735 func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) } 736 func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) } 737 func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) } 738 func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } 739 func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } 740 func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) } 741 742 func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { 743 t.Parallel() 744 745 tester := newTester() 746 defer tester.terminate() 747 748 // Create various peers with various parts of the chain 749 targetPeers := 8 750 chain := testChainBase.shorten(targetPeers * 100) 751 752 for i := 0; i < targetPeers; i++ { 753 id := fmt.Sprintf("peer #%d", i) 754 tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1))) 755 } 756 if err := tester.sync("peer #0", nil, mode); err != nil { 757 t.Fatalf("failed to synchronise blocks: %v", err) 758 } 759 assertOwnChain(t, tester, chain.len()) 760 } 761 762 // Tests that synchronisations behave well in multi-version protocol environments 763 // and not wreak havoc on other nodes in the network. 764 func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) } 765 func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) } 766 func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) } 767 func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } 768 func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } 769 func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) } 770 771 func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { 772 t.Parallel() 773 774 tester := newTester() 775 defer tester.terminate() 776 777 // Create a small enough block chain to download 778 chain := testChainBase.shorten(blockCacheItems - 15) 779 780 // Create peers of every type 781 tester.newPeer("peer 62", 62, chain) 782 tester.newPeer("peer 63", 63, chain) 783 tester.newPeer("peer 64", 64, chain) 784 785 // Synchronise with the requested peer and make sure all blocks were retrieved 786 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 787 t.Fatalf("failed to synchronise blocks: %v", err) 788 } 789 assertOwnChain(t, tester, chain.len()) 790 791 // Check that no peers have been dropped off 792 for _, version := range []int{62, 63, 64} { 793 peer := fmt.Sprintf("peer %d", version) 794 if _, ok := tester.peers[peer]; !ok { 795 t.Errorf("%s dropped", peer) 796 } 797 } 798 } 799 800 // Tests that if a block is empty (e.g. header only), no body request should be 801 // made, and instead the header should be assembled into a whole block in itself. 802 func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) } 803 func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) } 804 func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) } 805 func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } 806 func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } 807 func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) } 808 809 func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { 810 t.Parallel() 811 812 tester := newTester() 813 defer tester.terminate() 814 815 // Create a block chain to download 816 chain := testChainBase 817 tester.newPeer("peer", protocol, chain) 818 819 // Instrument the downloader to signal body requests 820 bodiesHave, receiptsHave := int32(0), int32(0) 821 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 822 atomic.AddInt32(&bodiesHave, int32(len(headers))) 823 } 824 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 825 atomic.AddInt32(&receiptsHave, int32(len(headers))) 826 } 827 // Synchronise with the peer and make sure all blocks were retrieved 828 if err := tester.sync("peer", nil, mode); err != nil { 829 t.Fatalf("failed to synchronise blocks: %v", err) 830 } 831 assertOwnChain(t, tester, chain.len()) 832 833 // Validate the number of block bodies that should have been requested 834 bodiesNeeded, receiptsNeeded := 0, 0 835 for _, block := range chain.blockm { 836 if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 837 bodiesNeeded++ 838 } 839 } 840 for _, receipt := range chain.receiptm { 841 if mode == FastSync && len(receipt) > 0 { 842 receiptsNeeded++ 843 } 844 } 845 if int(bodiesHave) != bodiesNeeded { 846 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 847 } 848 if int(receiptsHave) != receiptsNeeded { 849 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) 850 } 851 } 852 853 // Tests that headers are enqueued continuously, preventing malicious nodes from 854 // stalling the downloader by feeding gapped header chains. 855 func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) } 856 func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) } 857 func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) } 858 func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } 859 func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } 860 func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) } 861 862 func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 863 t.Parallel() 864 865 tester := newTester() 866 defer tester.terminate() 867 868 chain := testChainBase.shorten(blockCacheItems - 15) 869 brokenChain := chain.shorten(chain.len()) 870 delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2]) 871 tester.newPeer("attack", protocol, brokenChain) 872 873 if err := tester.sync("attack", nil, mode); err == nil { 874 t.Fatalf("succeeded attacker synchronisation") 875 } 876 // Synchronise with the valid peer and make sure sync succeeds 877 tester.newPeer("valid", protocol, chain) 878 if err := tester.sync("valid", nil, mode); err != nil { 879 t.Fatalf("failed to synchronise blocks: %v", err) 880 } 881 assertOwnChain(t, tester, chain.len()) 882 } 883 884 // Tests that if requested headers are shifted (i.e. first is missing), the queue 885 // detects the invalid numbering. 886 func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) } 887 func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) } 888 func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) } 889 func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } 890 func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } 891 func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) } 892 893 func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 894 t.Parallel() 895 896 tester := newTester() 897 defer tester.terminate() 898 899 chain := testChainBase.shorten(blockCacheItems - 15) 900 901 // Attempt a full sync with an attacker feeding shifted headers 902 brokenChain := chain.shorten(chain.len()) 903 delete(brokenChain.headerm, brokenChain.chain[1]) 904 delete(brokenChain.blockm, brokenChain.chain[1]) 905 delete(brokenChain.receiptm, brokenChain.chain[1]) 906 tester.newPeer("attack", protocol, brokenChain) 907 if err := tester.sync("attack", nil, mode); err == nil { 908 t.Fatalf("succeeded attacker synchronisation") 909 } 910 911 // Synchronise with the valid peer and make sure sync succeeds 912 tester.newPeer("valid", protocol, chain) 913 if err := tester.sync("valid", nil, mode); err != nil { 914 t.Fatalf("failed to synchronise blocks: %v", err) 915 } 916 assertOwnChain(t, tester, chain.len()) 917 } 918 919 // Tests that upon detecting an invalid header, the recent ones are rolled back 920 // for various failure scenarios. Afterwards a full sync is attempted to make 921 // sure no state was corrupted. 922 func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) } 923 func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } 924 func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) } 925 926 func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { 927 t.Parallel() 928 929 tester := newTester() 930 defer tester.terminate() 931 932 // Create a small enough block chain to download 933 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 934 chain := testChainBase.shorten(targetBlocks) 935 936 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 937 // This should result in the last fsHeaderSafetyNet headers being rolled back. 938 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 939 fastAttackChain := chain.shorten(chain.len()) 940 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) 941 tester.newPeer("fast-attack", protocol, fastAttackChain) 942 943 if err := tester.sync("fast-attack", nil, mode); err == nil { 944 t.Fatalf("succeeded fast attacker synchronisation") 945 } 946 if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { 947 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 948 } 949 950 // Attempt to sync with an attacker that feeds junk during the block import phase. 951 // This should result in both the last fsHeaderSafetyNet number of headers being 952 // rolled back, and also the pivot point being reverted to a non-block status. 953 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 954 blockAttackChain := chain.shorten(chain.len()) 955 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in 956 delete(blockAttackChain.headerm, blockAttackChain.chain[missing]) 957 tester.newPeer("block-attack", protocol, blockAttackChain) 958 959 if err := tester.sync("block-attack", nil, mode); err == nil { 960 t.Fatalf("succeeded block attacker synchronisation") 961 } 962 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 963 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 964 } 965 if mode == FastSync { 966 if head := tester.CurrentBlock().NumberU64(); head != 0 { 967 t.Errorf("fast sync pivot block #%d not rolled back", head) 968 } 969 } 970 971 // Attempt to sync with an attacker that withholds promised blocks after the 972 // fast sync pivot point. This could be a trial to leave the node with a bad 973 // but already imported pivot block. 974 withholdAttackChain := chain.shorten(chain.len()) 975 tester.newPeer("withhold-attack", protocol, withholdAttackChain) 976 tester.downloader.syncInitHook = func(uint64, uint64) { 977 for i := missing; i < withholdAttackChain.len(); i++ { 978 delete(withholdAttackChain.headerm, withholdAttackChain.chain[i]) 979 } 980 tester.downloader.syncInitHook = nil 981 } 982 if err := tester.sync("withhold-attack", nil, mode); err == nil { 983 t.Fatalf("succeeded withholding attacker synchronisation") 984 } 985 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 986 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 987 } 988 if mode == FastSync { 989 if head := tester.CurrentBlock().NumberU64(); head != 0 { 990 t.Errorf("fast sync pivot block #%d not rolled back", head) 991 } 992 } 993 994 // synchronise with the valid peer and make sure sync succeeds. Since the last rollback 995 // should also disable fast syncing for this process, verify that we did a fresh full 996 // sync. Note, we can't assert anything about the receipts since we won't purge the 997 // database of them, hence we can't use assertOwnChain. 998 tester.newPeer("valid", protocol, chain) 999 if err := tester.sync("valid", nil, mode); err != nil { 1000 t.Fatalf("failed to synchronise blocks: %v", err) 1001 } 1002 if hs := len(tester.ownHeaders); hs != chain.len() { 1003 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len()) 1004 } 1005 if mode != LightSync { 1006 if bs := len(tester.ownBlocks); bs != chain.len() { 1007 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len()) 1008 } 1009 } 1010 } 1011 1012 // Tests that a peer advertising an high TD doesn't get to stall the downloader 1013 // afterwards by not sending any useful hashes. 1014 func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) } 1015 func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) } 1016 func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) } 1017 func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } 1018 func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } 1019 func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) } 1020 1021 func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { 1022 t.Parallel() 1023 1024 tester := newTester() 1025 defer tester.terminate() 1026 1027 chain := testChainBase.shorten(1) 1028 tester.newPeer("attack", protocol, chain) 1029 if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { 1030 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 1031 } 1032 } 1033 1034 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 1035 func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) } 1036 func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) } 1037 func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } 1038 1039 func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { 1040 t.Parallel() 1041 1042 // Define the disconnection requirement for individual hash fetch errors 1043 tests := []struct { 1044 result error 1045 drop bool 1046 }{ 1047 {nil, false}, // Sync succeeded, all is well 1048 {errBusy, false}, // Sync is already in progress, no problem 1049 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1050 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1051 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1052 {errNoPeers, false}, // No peers to download from, soft race, no issue 1053 {errTimeout, true}, // No hashes received in due time, drop the peer 1054 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1055 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1056 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1057 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1058 {errInvalidBlock, false}, // A bad peer was detected, but not the sync origin 1059 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1060 {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin 1061 {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1062 {errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1063 {errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1064 {errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1065 {errCancelHeaderProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1066 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1067 } 1068 // Run the tests and check disconnection status 1069 tester := newTester() 1070 defer tester.terminate() 1071 chain := testChainBase.shorten(1) 1072 1073 for i, tt := range tests { 1074 // Register a new peer and ensure it's presence 1075 id := fmt.Sprintf("test %d", i) 1076 if err := tester.newPeer(id, protocol, chain); err != nil { 1077 t.Fatalf("test %d: failed to register new peer: %v", i, err) 1078 } 1079 if _, ok := tester.peers[id]; !ok { 1080 t.Fatalf("test %d: registered peer not found", i) 1081 } 1082 // Simulate a synchronisation and check the required result 1083 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1084 1085 tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) 1086 if _, ok := tester.peers[id]; !ok != tt.drop { 1087 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1088 } 1089 } 1090 } 1091 1092 // Tests that synchronisation progress (origin block number, current block number 1093 // and highest block number) is tracked and updated correctly. 1094 func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) } 1095 func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) } 1096 func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) } 1097 func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } 1098 func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } 1099 func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) } 1100 1101 func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1102 t.Parallel() 1103 1104 tester := newTester() 1105 defer tester.terminate() 1106 chain := testChainBase.shorten(blockCacheItems - 15) 1107 1108 // Set a sync init hook to catch progress changes 1109 starting := make(chan struct{}) 1110 progress := make(chan struct{}) 1111 1112 tester.downloader.syncInitHook = func(origin, latest uint64) { 1113 starting <- struct{}{} 1114 <-progress 1115 } 1116 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1117 1118 // Synchronise half the blocks and check initial progress 1119 tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2)) 1120 pending := new(sync.WaitGroup) 1121 pending.Add(1) 1122 1123 go func() { 1124 defer pending.Done() 1125 if err := tester.sync("peer-half", nil, mode); err != nil { 1126 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1127 } 1128 }() 1129 <-starting 1130 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1131 HighestBlock: uint64(chain.len()/2 - 1), 1132 }) 1133 progress <- struct{}{} 1134 pending.Wait() 1135 1136 // Synchronise all the blocks and check continuation progress 1137 tester.newPeer("peer-full", protocol, chain) 1138 pending.Add(1) 1139 go func() { 1140 defer pending.Done() 1141 if err := tester.sync("peer-full", nil, mode); err != nil { 1142 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1143 } 1144 }() 1145 <-starting 1146 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1147 StartingBlock: uint64(chain.len()/2 - 1), 1148 CurrentBlock: uint64(chain.len()/2 - 1), 1149 HighestBlock: uint64(chain.len() - 1), 1150 }) 1151 1152 // Check final progress after successful sync 1153 progress <- struct{}{} 1154 pending.Wait() 1155 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1156 StartingBlock: uint64(chain.len()/2 - 1), 1157 CurrentBlock: uint64(chain.len() - 1), 1158 HighestBlock: uint64(chain.len() - 1), 1159 }) 1160 } 1161 1162 func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { 1163 // Mark this method as a helper to report errors at callsite, not in here 1164 t.Helper() 1165 1166 p := d.Progress() 1167 p.KnownStates, p.PulledStates = 0, 0 1168 want.KnownStates, want.PulledStates = 0, 0 1169 if p != want { 1170 t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) 1171 } 1172 } 1173 1174 // Tests that synchronisation progress (origin block number and highest block 1175 // number) is tracked and updated correctly in case of a fork (or manual head 1176 // revertal). 1177 func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) } 1178 func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) } 1179 func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) } 1180 func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } 1181 func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } 1182 func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) } 1183 1184 func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1185 t.Parallel() 1186 1187 tester := newTester() 1188 defer tester.terminate() 1189 chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch) 1190 chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch) 1191 1192 // Set a sync init hook to catch progress changes 1193 starting := make(chan struct{}) 1194 progress := make(chan struct{}) 1195 1196 tester.downloader.syncInitHook = func(origin, latest uint64) { 1197 starting <- struct{}{} 1198 <-progress 1199 } 1200 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1201 1202 // Synchronise with one of the forks and check progress 1203 tester.newPeer("fork A", protocol, chainA) 1204 pending := new(sync.WaitGroup) 1205 pending.Add(1) 1206 go func() { 1207 defer pending.Done() 1208 if err := tester.sync("fork A", nil, mode); err != nil { 1209 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1210 } 1211 }() 1212 <-starting 1213 1214 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1215 HighestBlock: uint64(chainA.len() - 1), 1216 }) 1217 progress <- struct{}{} 1218 pending.Wait() 1219 1220 // Simulate a successful sync above the fork 1221 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1222 1223 // Synchronise with the second fork and check progress resets 1224 tester.newPeer("fork B", protocol, chainB) 1225 pending.Add(1) 1226 go func() { 1227 defer pending.Done() 1228 if err := tester.sync("fork B", nil, mode); err != nil { 1229 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1230 } 1231 }() 1232 <-starting 1233 checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ 1234 StartingBlock: uint64(testChainBase.len()) - 1, 1235 CurrentBlock: uint64(chainA.len() - 1), 1236 HighestBlock: uint64(chainB.len() - 1), 1237 }) 1238 1239 // Check final progress after successful sync 1240 progress <- struct{}{} 1241 pending.Wait() 1242 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1243 StartingBlock: uint64(testChainBase.len()) - 1, 1244 CurrentBlock: uint64(chainB.len() - 1), 1245 HighestBlock: uint64(chainB.len() - 1), 1246 }) 1247 } 1248 1249 // Tests that if synchronisation is aborted due to some failure, then the progress 1250 // origin is not updated in the next sync cycle, as it should be considered the 1251 // continuation of the previous sync and not a new instance. 1252 func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) } 1253 func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) } 1254 func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) } 1255 func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } 1256 func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } 1257 func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) } 1258 1259 func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1260 t.Parallel() 1261 1262 tester := newTester() 1263 defer tester.terminate() 1264 chain := testChainBase.shorten(blockCacheItems - 15) 1265 1266 // Set a sync init hook to catch progress changes 1267 starting := make(chan struct{}) 1268 progress := make(chan struct{}) 1269 1270 tester.downloader.syncInitHook = func(origin, latest uint64) { 1271 starting <- struct{}{} 1272 <-progress 1273 } 1274 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1275 1276 // Attempt a full sync with a faulty peer 1277 brokenChain := chain.shorten(chain.len()) 1278 missing := brokenChain.len() / 2 1279 delete(brokenChain.headerm, brokenChain.chain[missing]) 1280 delete(brokenChain.blockm, brokenChain.chain[missing]) 1281 delete(brokenChain.receiptm, brokenChain.chain[missing]) 1282 tester.newPeer("faulty", protocol, brokenChain) 1283 1284 pending := new(sync.WaitGroup) 1285 pending.Add(1) 1286 go func() { 1287 defer pending.Done() 1288 if err := tester.sync("faulty", nil, mode); err == nil { 1289 panic("succeeded faulty synchronisation") 1290 } 1291 }() 1292 <-starting 1293 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1294 HighestBlock: uint64(brokenChain.len() - 1), 1295 }) 1296 progress <- struct{}{} 1297 pending.Wait() 1298 afterFailedSync := tester.downloader.Progress() 1299 1300 // Synchronise with a good peer and check that the progress origin remind the same 1301 // after a failure 1302 tester.newPeer("valid", protocol, chain) 1303 pending.Add(1) 1304 go func() { 1305 defer pending.Done() 1306 if err := tester.sync("valid", nil, mode); err != nil { 1307 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1308 } 1309 }() 1310 <-starting 1311 checkProgress(t, tester.downloader, "completing", afterFailedSync) 1312 1313 // Check final progress after successful sync 1314 progress <- struct{}{} 1315 pending.Wait() 1316 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1317 CurrentBlock: uint64(chain.len() - 1), 1318 HighestBlock: uint64(chain.len() - 1), 1319 }) 1320 } 1321 1322 // Tests that if an attacker fakes a chain height, after the attack is detected, 1323 // the progress height is successfully reduced at the next sync invocation. 1324 func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) } 1325 func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) } 1326 func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) } 1327 func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } 1328 func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } 1329 func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) } 1330 1331 func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1332 t.Parallel() 1333 1334 tester := newTester() 1335 defer tester.terminate() 1336 chain := testChainBase.shorten(blockCacheItems - 15) 1337 1338 // Set a sync init hook to catch progress changes 1339 starting := make(chan struct{}) 1340 progress := make(chan struct{}) 1341 tester.downloader.syncInitHook = func(origin, latest uint64) { 1342 starting <- struct{}{} 1343 <-progress 1344 } 1345 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1346 1347 // Create and sync with an attacker that promises a higher chain than available. 1348 brokenChain := chain.shorten(chain.len()) 1349 numMissing := 5 1350 for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- { 1351 delete(brokenChain.headerm, brokenChain.chain[i]) 1352 } 1353 tester.newPeer("attack", protocol, brokenChain) 1354 1355 pending := new(sync.WaitGroup) 1356 pending.Add(1) 1357 go func() { 1358 defer pending.Done() 1359 if err := tester.sync("attack", nil, mode); err == nil { 1360 panic("succeeded attacker synchronisation") 1361 } 1362 }() 1363 <-starting 1364 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1365 HighestBlock: uint64(brokenChain.len() - 1), 1366 }) 1367 progress <- struct{}{} 1368 pending.Wait() 1369 afterFailedSync := tester.downloader.Progress() 1370 1371 // Synchronise with a good peer and check that the progress height has been reduced to 1372 // the true value. 1373 validChain := chain.shorten(chain.len() - numMissing) 1374 tester.newPeer("valid", protocol, validChain) 1375 pending.Add(1) 1376 1377 go func() { 1378 defer pending.Done() 1379 if err := tester.sync("valid", nil, mode); err != nil { 1380 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1381 } 1382 }() 1383 <-starting 1384 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1385 CurrentBlock: afterFailedSync.CurrentBlock, 1386 HighestBlock: uint64(validChain.len() - 1), 1387 }) 1388 1389 // Check final progress after successful sync. 1390 progress <- struct{}{} 1391 pending.Wait() 1392 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1393 CurrentBlock: uint64(validChain.len() - 1), 1394 HighestBlock: uint64(validChain.len() - 1), 1395 }) 1396 } 1397 1398 // This test reproduces an issue where unexpected deliveries would 1399 // block indefinitely if they arrived at the right time. 1400 func TestDeliverHeadersHang(t *testing.T) { 1401 t.Parallel() 1402 1403 testCases := []struct { 1404 protocol int 1405 syncMode SyncMode 1406 }{ 1407 {62, FullSync}, 1408 {63, FullSync}, 1409 {63, FastSync}, 1410 {64, FullSync}, 1411 {64, FastSync}, 1412 {64, LightSync}, 1413 } 1414 for _, tc := range testCases { 1415 t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) { 1416 t.Parallel() 1417 testDeliverHeadersHang(t, tc.protocol, tc.syncMode) 1418 }) 1419 } 1420 } 1421 1422 func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { 1423 master := newTester() 1424 defer master.terminate() 1425 chain := testChainBase.shorten(15) 1426 1427 for i := 0; i < 200; i++ { 1428 tester := newTester() 1429 tester.peerDb = master.peerDb 1430 tester.newPeer("peer", protocol, chain) 1431 1432 // Whenever the downloader requests headers, flood it with 1433 // a lot of unrequested header deliveries. 1434 tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ 1435 peer: tester.downloader.peers.peers["peer"].peer, 1436 tester: tester, 1437 } 1438 if err := tester.sync("peer", nil, mode); err != nil { 1439 t.Errorf("test %d: sync failed: %v", i, err) 1440 } 1441 tester.terminate() 1442 } 1443 } 1444 1445 type floodingTestPeer struct { 1446 peer Peer 1447 tester *downloadTester 1448 } 1449 1450 func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } 1451 func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { 1452 return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) 1453 } 1454 func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { 1455 return ftp.peer.RequestBodies(hashes) 1456 } 1457 func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { 1458 return ftp.peer.RequestReceipts(hashes) 1459 } 1460 func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { 1461 return ftp.peer.RequestNodeData(hashes) 1462 } 1463 1464 func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { 1465 deliveriesDone := make(chan struct{}, 500) 1466 for i := 0; i < cap(deliveriesDone)-1; i++ { 1467 peer := fmt.Sprintf("fake-peer%d", i) 1468 go func() { 1469 ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) 1470 deliveriesDone <- struct{}{} 1471 }() 1472 } 1473 1474 // None of the extra deliveries should block. 1475 timeout := time.After(60 * time.Second) 1476 launched := false 1477 for i := 0; i < cap(deliveriesDone); i++ { 1478 select { 1479 case <-deliveriesDone: 1480 if !launched { 1481 // Start delivering the requested headers 1482 // after one of the flooding responses has arrived. 1483 go func() { 1484 ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) 1485 deliveriesDone <- struct{}{} 1486 }() 1487 launched = true 1488 } 1489 case <-timeout: 1490 panic("blocked") 1491 } 1492 } 1493 return nil 1494 } 1495 1496 func TestRemoteHeaderRequestSpan(t *testing.T) { 1497 testCases := []struct { 1498 remoteHeight uint64 1499 localHeight uint64 1500 expected []int 1501 }{ 1502 // Remote is way higher. We should ask for the remote head and go backwards 1503 {1500, 1000, 1504 []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, 1505 }, 1506 {15000, 13006, 1507 []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, 1508 }, 1509 //Remote is pretty close to us. We don't have to fetch as many 1510 {1200, 1150, 1511 []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, 1512 }, 1513 // Remote is equal to us (so on a fork with higher td) 1514 // We should get the closest couple of ancestors 1515 {1500, 1500, 1516 []int{1497, 1499}, 1517 }, 1518 // We're higher than the remote! Odd 1519 {1000, 1500, 1520 []int{997, 999}, 1521 }, 1522 // Check some weird edgecases that it behaves somewhat rationally 1523 {0, 1500, 1524 []int{0, 2}, 1525 }, 1526 {6000000, 0, 1527 []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, 1528 }, 1529 {0, 0, 1530 []int{0, 2}, 1531 }, 1532 } 1533 reqs := func(from, count, span int) []int { 1534 var r []int 1535 num := from 1536 for len(r) < count { 1537 r = append(r, num) 1538 num += span + 1 1539 } 1540 return r 1541 } 1542 for i, tt := range testCases { 1543 from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) 1544 data := reqs(int(from), count, span) 1545 1546 if max != uint64(data[len(data)-1]) { 1547 t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) 1548 } 1549 failed := false 1550 if len(data) != len(tt.expected) { 1551 failed = true 1552 t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) 1553 } else { 1554 for j, n := range data { 1555 if n != tt.expected[j] { 1556 failed = true 1557 break 1558 } 1559 } 1560 } 1561 if failed { 1562 res := strings.Replace(fmt.Sprint(data), " ", ",", -1) 1563 exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) 1564 fmt.Printf("got: %v\n", res) 1565 fmt.Printf("exp: %v\n", exp) 1566 t.Errorf("test %d: wrong values", i) 1567 } 1568 } 1569 }