github.com/alexdevranger/node-1.8.27@v0.0.0-20221128213301-aa5841e41d2d/eth/downloader/downloader_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-dubxcoin library. 3 // 4 // The go-dubxcoin library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-dubxcoin library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-dubxcoin library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "strings" 24 "sync" 25 "sync/atomic" 26 "testing" 27 "time" 28 29 "github.com/alexdevranger/node-1.8.27" 30 "github.com/alexdevranger/node-1.8.27/common" 31 "github.com/alexdevranger/node-1.8.27/core/types" 32 "github.com/alexdevranger/node-1.8.27/ethdb" 33 "github.com/alexdevranger/node-1.8.27/event" 34 "github.com/alexdevranger/node-1.8.27/trie" 35 ) 36 37 // Reduce some of the parameters to make the tester faster. 38 func init() { 39 MaxForkAncestry = uint64(10000) 40 blockCacheItems = 1024 41 fsHeaderContCheck = 500 * time.Millisecond 42 } 43 44 // downloadTester is a test simulator for mocking out local block chain. 45 type downloadTester struct { 46 downloader *Downloader 47 48 genesis *types.Block // Genesis blocks used by the tester and peers 49 stateDb ethdb.Database // Database used by the tester for syncing from peers 50 peerDb ethdb.Database // Database of the peers containing all data 51 peers map[string]*downloadTesterPeer 52 53 ownHashes []common.Hash // Hash chain belonging to the tester 54 ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester 55 ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester 56 ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester 57 ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain 58 59 lock sync.RWMutex 60 } 61 62 // newTester creates a new downloader test mocker. 63 func newTester() *downloadTester { 64 tester := &downloadTester{ 65 genesis: testGenesis, 66 peerDb: testDB, 67 peers: make(map[string]*downloadTesterPeer), 68 ownHashes: []common.Hash{testGenesis.Hash()}, 69 ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, 70 ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, 71 ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, 72 ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, 73 } 74 tester.stateDb = ethdb.NewMemDatabase() 75 tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) 76 77 tester.downloader = New(FullSync, 0, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer) 78 return tester 79 } 80 81 // terminate aborts any operations on the embedded downloader and releases all 82 // held resources. 83 func (dl *downloadTester) terminate() { 84 dl.downloader.Terminate() 85 } 86 87 // sync starts synchronizing with a remote peer, blocking until it completes. 88 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 89 dl.lock.RLock() 90 hash := dl.peers[id].chain.headBlock().Hash() 91 // If no particular TD was requested, load from the peer's blockchain 92 if td == nil { 93 td = dl.peers[id].chain.td(hash) 94 } 95 dl.lock.RUnlock() 96 97 // Synchronise with the chosen peer and ensure proper cleanup afterwards 98 err := dl.downloader.synchronise(id, hash, td, mode) 99 select { 100 case <-dl.downloader.cancelCh: 101 // Ok, downloader fully cancelled after sync cycle 102 default: 103 // Downloader is still accepting packets, can block a peer up 104 panic("downloader active post sync cycle") // panic will be caught by tester 105 } 106 return err 107 } 108 109 // HasHeader checks if a header is present in the testers canonical chain. 110 func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { 111 return dl.GetHeaderByHash(hash) != nil 112 } 113 114 // HasBlock checks if a block is present in the testers canonical chain. 115 func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { 116 return dl.GetBlockByHash(hash) != nil 117 } 118 119 // HasFastBlock checks if a block is present in the testers canonical chain. 120 func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { 121 dl.lock.RLock() 122 defer dl.lock.RUnlock() 123 124 _, ok := dl.ownReceipts[hash] 125 return ok 126 } 127 128 // GetHeader retrieves a header from the testers canonical chain. 129 func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { 130 dl.lock.RLock() 131 defer dl.lock.RUnlock() 132 133 return dl.ownHeaders[hash] 134 } 135 136 // GetBlock retrieves a block from the testers canonical chain. 137 func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { 138 dl.lock.RLock() 139 defer dl.lock.RUnlock() 140 141 return dl.ownBlocks[hash] 142 } 143 144 // CurrentHeader retrieves the current head header from the canonical chain. 145 func (dl *downloadTester) CurrentHeader() *types.Header { 146 dl.lock.RLock() 147 defer dl.lock.RUnlock() 148 149 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 150 if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { 151 return header 152 } 153 } 154 return dl.genesis.Header() 155 } 156 157 // CurrentBlock retrieves the current head block from the canonical chain. 158 func (dl *downloadTester) CurrentBlock() *types.Block { 159 dl.lock.RLock() 160 defer dl.lock.RUnlock() 161 162 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 163 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 164 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 165 return block 166 } 167 } 168 } 169 return dl.genesis 170 } 171 172 // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. 173 func (dl *downloadTester) CurrentFastBlock() *types.Block { 174 dl.lock.RLock() 175 defer dl.lock.RUnlock() 176 177 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 178 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 179 return block 180 } 181 } 182 return dl.genesis 183 } 184 185 // FastSyncCommitHead manually sets the head block to a given hash. 186 func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { 187 // For now only check that the state trie is correct 188 if block := dl.GetBlockByHash(hash); block != nil { 189 _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0) 190 return err 191 } 192 return fmt.Errorf("non existent block: %x", hash[:4]) 193 } 194 195 // GetTd retrieves the block's total difficulty from the canonical chain. 196 func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { 197 dl.lock.RLock() 198 defer dl.lock.RUnlock() 199 200 return dl.ownChainTd[hash] 201 } 202 203 // InsertHeaderChain injects a new batch of headers into the simulated chain. 204 func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) { 205 dl.lock.Lock() 206 defer dl.lock.Unlock() 207 208 // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors 209 if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok { 210 return 0, errors.New("unknown parent") 211 } 212 for i := 1; i < len(headers); i++ { 213 if headers[i].ParentHash != headers[i-1].Hash() { 214 return i, errors.New("unknown parent") 215 } 216 } 217 // Do a full insert if pre-checks passed 218 for i, header := range headers { 219 if _, ok := dl.ownHeaders[header.Hash()]; ok { 220 continue 221 } 222 if _, ok := dl.ownHeaders[header.ParentHash]; !ok { 223 return i, errors.New("unknown parent") 224 } 225 dl.ownHashes = append(dl.ownHashes, header.Hash()) 226 dl.ownHeaders[header.Hash()] = header 227 dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty) 228 } 229 return len(headers), nil 230 } 231 232 // InsertChain injects a new batch of blocks into the simulated chain. 233 func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) { 234 dl.lock.Lock() 235 defer dl.lock.Unlock() 236 237 for i, block := range blocks { 238 if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { 239 return i, errors.New("unknown parent") 240 } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { 241 return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err) 242 } 243 if _, ok := dl.ownHeaders[block.Hash()]; !ok { 244 dl.ownHashes = append(dl.ownHashes, block.Hash()) 245 dl.ownHeaders[block.Hash()] = block.Header() 246 } 247 dl.ownBlocks[block.Hash()] = block 248 dl.ownReceipts[block.Hash()] = make(types.Receipts, 0) 249 dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) 250 dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty()) 251 } 252 return len(blocks), nil 253 } 254 255 // InsertReceiptChain injects a new batch of receipts into the simulated chain. 256 func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (i int, err error) { 257 dl.lock.Lock() 258 defer dl.lock.Unlock() 259 260 for i := 0; i < len(blocks) && i < len(receipts); i++ { 261 if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { 262 return i, errors.New("unknown owner") 263 } 264 if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { 265 return i, errors.New("unknown parent") 266 } 267 dl.ownBlocks[blocks[i].Hash()] = blocks[i] 268 dl.ownReceipts[blocks[i].Hash()] = receipts[i] 269 } 270 return len(blocks), nil 271 } 272 273 // Rollback removes some recently added elements from the chain. 274 func (dl *downloadTester) Rollback(hashes []common.Hash) { 275 dl.lock.Lock() 276 defer dl.lock.Unlock() 277 278 for i := len(hashes) - 1; i >= 0; i-- { 279 if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { 280 dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] 281 } 282 delete(dl.ownChainTd, hashes[i]) 283 delete(dl.ownHeaders, hashes[i]) 284 delete(dl.ownReceipts, hashes[i]) 285 delete(dl.ownBlocks, hashes[i]) 286 } 287 } 288 289 // newPeer registers a new block download source into the downloader. 290 func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error { 291 dl.lock.Lock() 292 defer dl.lock.Unlock() 293 294 peer := &downloadTesterPeer{dl: dl, id: id, chain: chain} 295 dl.peers[id] = peer 296 return dl.downloader.RegisterPeer(id, version, peer) 297 } 298 299 // dropPeer simulates a hard peer removal from the connection pool. 300 func (dl *downloadTester) dropPeer(id string) { 301 dl.lock.Lock() 302 defer dl.lock.Unlock() 303 304 delete(dl.peers, id) 305 dl.downloader.UnregisterPeer(id) 306 } 307 308 type downloadTesterPeer struct { 309 dl *downloadTester 310 id string 311 lock sync.RWMutex 312 chain *testChain 313 missingStates map[common.Hash]bool // State entries that fast sync should not return 314 } 315 316 // Head constructs a function to retrieve a peer's current head hash 317 // and total difficulty. 318 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 319 b := dlp.chain.headBlock() 320 return b.Hash(), dlp.chain.td(b.Hash()) 321 } 322 323 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 324 // origin; associated with a particular peer in the download tester. The returned 325 // function can be used to retrieve batches of headers from the particular peer. 326 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { 327 if reverse { 328 panic("reverse header requests not supported") 329 } 330 331 result := dlp.chain.headersByHash(origin, amount, skip) 332 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 333 return nil 334 } 335 336 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 337 // origin; associated with a particular peer in the download tester. The returned 338 // function can be used to retrieve batches of headers from the particular peer. 339 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { 340 if reverse { 341 panic("reverse header requests not supported") 342 } 343 344 result := dlp.chain.headersByNumber(origin, amount, skip) 345 go dlp.dl.downloader.DeliverHeaders(dlp.id, result) 346 return nil 347 } 348 349 // RequestBodies constructs a getBlockBodies method associated with a particular 350 // peer in the download tester. The returned function can be used to retrieve 351 // batches of block bodies from the particularly requested peer. 352 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { 353 txs, uncles := dlp.chain.bodies(hashes) 354 go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles) 355 return nil 356 } 357 358 // RequestReceipts constructs a getReceipts method associated with a particular 359 // peer in the download tester. The returned function can be used to retrieve 360 // batches of block receipts from the particularly requested peer. 361 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { 362 receipts := dlp.chain.receipts(hashes) 363 go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts) 364 return nil 365 } 366 367 // RequestNodeData constructs a getNodeData method associated with a particular 368 // peer in the download tester. The returned function can be used to retrieve 369 // batches of node state data from the particularly requested peer. 370 func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { 371 dlp.dl.lock.RLock() 372 defer dlp.dl.lock.RUnlock() 373 374 results := make([][]byte, 0, len(hashes)) 375 for _, hash := range hashes { 376 if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { 377 if !dlp.missingStates[hash] { 378 results = append(results, data) 379 } 380 } 381 } 382 go dlp.dl.downloader.DeliverNodeData(dlp.id, results) 383 return nil 384 } 385 386 // assertOwnChain checks if the local chain contains the correct number of items 387 // of the various chain components. 388 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 389 // Mark this method as a helper to report errors at callsite, not in here 390 t.Helper() 391 392 assertOwnForkedChain(t, tester, 1, []int{length}) 393 } 394 395 // assertOwnForkedChain checks if the local forked chain contains the correct 396 // number of items of the various chain components. 397 func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { 398 // Mark this method as a helper to report errors at callsite, not in here 399 t.Helper() 400 401 // Initialize the counters for the first fork 402 headers, blocks, receipts := lengths[0], lengths[0], lengths[0] 403 404 // Update the counters for each subsequent fork 405 for _, length := range lengths[1:] { 406 headers += length - common 407 blocks += length - common 408 receipts += length - common 409 } 410 if tester.downloader.mode == LightSync { 411 blocks, receipts = 1, 1 412 } 413 if hs := len(tester.ownHeaders); hs != headers { 414 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 415 } 416 if bs := len(tester.ownBlocks); bs != blocks { 417 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 418 } 419 if rs := len(tester.ownReceipts); rs != receipts { 420 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 421 } 422 } 423 424 // Tests that simple synchronization against a canonical chain works correctly. 425 // In this test common ancestor lookup should be short circuited and not require 426 // binary searching. 427 func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) } 428 func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) } 429 func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) } 430 func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } 431 func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } 432 func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) } 433 434 func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { 435 t.Parallel() 436 437 tester := newTester() 438 defer tester.terminate() 439 440 // Create a small enough block chain to download 441 chain := testChainBase.shorten(blockCacheItems - 15) 442 tester.newPeer("peer", protocol, chain) 443 444 // Synchronise with the peer and make sure all relevant data was retrieved 445 if err := tester.sync("peer", nil, mode); err != nil { 446 t.Fatalf("failed to synchronise blocks: %v", err) 447 } 448 assertOwnChain(t, tester, chain.len()) 449 } 450 451 // Tests that if a large batch of blocks are being downloaded, it is throttled 452 // until the cached blocks are retrieved. 453 func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) } 454 func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) } 455 func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) } 456 func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } 457 func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } 458 459 func testThrottling(t *testing.T, protocol int, mode SyncMode) { 460 t.Parallel() 461 tester := newTester() 462 defer tester.terminate() 463 464 // Create a long block chain to download and the tester 465 targetBlocks := testChainBase.len() - 1 466 tester.newPeer("peer", protocol, testChainBase) 467 468 // Wrap the importer to allow stepping 469 blocked, proceed := uint32(0), make(chan struct{}) 470 tester.downloader.chainInsertHook = func(results []*fetchResult) { 471 atomic.StoreUint32(&blocked, uint32(len(results))) 472 <-proceed 473 } 474 // Start a synchronisation concurrently 475 errc := make(chan error) 476 go func() { 477 errc <- tester.sync("peer", nil, mode) 478 }() 479 // Iteratively take some blocks, always checking the retrieval count 480 for { 481 // Check the retrieval count synchronously (! reason for this ugly block) 482 tester.lock.RLock() 483 retrieved := len(tester.ownBlocks) 484 tester.lock.RUnlock() 485 if retrieved >= targetBlocks+1 { 486 break 487 } 488 // Wait a bit for sync to throttle itself 489 var cached, frozen int 490 for start := time.Now(); time.Since(start) < 3*time.Second; { 491 time.Sleep(25 * time.Millisecond) 492 493 tester.lock.Lock() 494 tester.downloader.queue.lock.Lock() 495 cached = len(tester.downloader.queue.blockDonePool) 496 if mode == FastSync { 497 if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { 498 cached = receipts 499 } 500 } 501 frozen = int(atomic.LoadUint32(&blocked)) 502 retrieved = len(tester.ownBlocks) 503 tester.downloader.queue.lock.Unlock() 504 tester.lock.Unlock() 505 506 if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { 507 break 508 } 509 } 510 // Make sure we filled up the cache, then exhaust it 511 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 512 513 tester.lock.RLock() 514 retrieved = len(tester.ownBlocks) 515 tester.lock.RUnlock() 516 if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { 517 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1) 518 } 519 // Permit the blocked blocks to import 520 if atomic.LoadUint32(&blocked) > 0 { 521 atomic.StoreUint32(&blocked, uint32(0)) 522 proceed <- struct{}{} 523 } 524 } 525 // Check that we haven't pulled more blocks than available 526 assertOwnChain(t, tester, targetBlocks+1) 527 if err := <-errc; err != nil { 528 t.Fatalf("block synchronization failed: %v", err) 529 } 530 } 531 532 // Tests that simple synchronization against a forked chain works correctly. In 533 // this test common ancestor lookup should *not* be short circuited, and a full 534 // binary search should be executed. 535 func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) } 536 func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) } 537 func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) } 538 func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } 539 func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } 540 func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) } 541 542 func testForkedSync(t *testing.T, protocol int, mode SyncMode) { 543 t.Parallel() 544 545 tester := newTester() 546 defer tester.terminate() 547 548 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 549 chainB := testChainForkLightB.shorten(testChainBase.len() + 80) 550 tester.newPeer("fork A", protocol, chainA) 551 tester.newPeer("fork B", protocol, chainB) 552 553 // Synchronise with the peer and make sure all blocks were retrieved 554 if err := tester.sync("fork A", nil, mode); err != nil { 555 t.Fatalf("failed to synchronise blocks: %v", err) 556 } 557 assertOwnChain(t, tester, chainA.len()) 558 559 // Synchronise with the second peer and make sure that fork is pulled too 560 if err := tester.sync("fork B", nil, mode); err != nil { 561 t.Fatalf("failed to synchronise blocks: %v", err) 562 } 563 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 564 } 565 566 // Tests that synchronising against a much shorter but much heavyer fork works 567 // corrently and is not dropped. 568 func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) } 569 func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) } 570 func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) } 571 func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } 572 func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } 573 func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) } 574 575 func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 576 t.Parallel() 577 578 tester := newTester() 579 defer tester.terminate() 580 581 chainA := testChainForkLightA.shorten(testChainBase.len() + 80) 582 chainB := testChainForkHeavy.shorten(testChainBase.len() + 80) 583 tester.newPeer("light", protocol, chainA) 584 tester.newPeer("heavy", protocol, chainB) 585 586 // Synchronise with the peer and make sure all blocks were retrieved 587 if err := tester.sync("light", nil, mode); err != nil { 588 t.Fatalf("failed to synchronise blocks: %v", err) 589 } 590 assertOwnChain(t, tester, chainA.len()) 591 592 // Synchronise with the second peer and make sure that fork is pulled too 593 if err := tester.sync("heavy", nil, mode); err != nil { 594 t.Fatalf("failed to synchronise blocks: %v", err) 595 } 596 assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) 597 } 598 599 // Tests that chain forks are contained within a certain interval of the current 600 // chain head, ensuring that malicious peers cannot waste resources by feeding 601 // long dead chains. 602 func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) } 603 func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) } 604 func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) } 605 func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } 606 func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } 607 func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) } 608 609 func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { 610 t.Parallel() 611 612 tester := newTester() 613 defer tester.terminate() 614 615 chainA := testChainForkLightA 616 chainB := testChainForkLightB 617 tester.newPeer("original", protocol, chainA) 618 tester.newPeer("rewriter", protocol, chainB) 619 620 // Synchronise with the peer and make sure all blocks were retrieved 621 if err := tester.sync("original", nil, mode); err != nil { 622 t.Fatalf("failed to synchronise blocks: %v", err) 623 } 624 assertOwnChain(t, tester, chainA.len()) 625 626 // Synchronise with the second peer and ensure that the fork is rejected to being too old 627 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 628 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 629 } 630 } 631 632 // Tests that chain forks are contained within a certain interval of the current 633 // chain head for short but heavy forks too. These are a bit special because they 634 // take different ancestor lookup paths. 635 func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) } 636 func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) } 637 func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) } 638 func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } 639 func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } 640 func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) } 641 642 func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 643 t.Parallel() 644 645 tester := newTester() 646 defer tester.terminate() 647 648 // Create a long enough forked chain 649 chainA := testChainForkLightA 650 chainB := testChainForkHeavy 651 tester.newPeer("original", protocol, chainA) 652 tester.newPeer("heavy-rewriter", protocol, chainB) 653 654 // Synchronise with the peer and make sure all blocks were retrieved 655 if err := tester.sync("original", nil, mode); err != nil { 656 t.Fatalf("failed to synchronise blocks: %v", err) 657 } 658 assertOwnChain(t, tester, chainA.len()) 659 660 // Synchronise with the second peer and ensure that the fork is rejected to being too old 661 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 662 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 663 } 664 } 665 666 // Tests that an inactive downloader will not accept incoming block headers and 667 // bodies. 668 func TestInactiveDownloader62(t *testing.T) { 669 t.Parallel() 670 671 tester := newTester() 672 defer tester.terminate() 673 674 // Check that neither block headers nor bodies are accepted 675 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 676 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 677 } 678 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 679 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 680 } 681 } 682 683 // Tests that an inactive downloader will not accept incoming block headers, 684 // bodies and receipts. 685 func TestInactiveDownloader63(t *testing.T) { 686 t.Parallel() 687 688 tester := newTester() 689 defer tester.terminate() 690 691 // Check that neither block headers nor bodies are accepted 692 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 693 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 694 } 695 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 696 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 697 } 698 if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { 699 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 700 } 701 } 702 703 // Tests that a canceled download wipes all previously accumulated state. 704 func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) } 705 func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) } 706 func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) } 707 func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } 708 func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } 709 func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) } 710 711 func testCancel(t *testing.T, protocol int, mode SyncMode) { 712 t.Parallel() 713 714 tester := newTester() 715 defer tester.terminate() 716 717 chain := testChainBase.shorten(MaxHeaderFetch) 718 tester.newPeer("peer", protocol, chain) 719 720 // Make sure canceling works with a pristine downloader 721 tester.downloader.Cancel() 722 if !tester.downloader.queue.Idle() { 723 t.Errorf("download queue not idle") 724 } 725 // Synchronise with the peer, but cancel afterwards 726 if err := tester.sync("peer", nil, mode); err != nil { 727 t.Fatalf("failed to synchronise blocks: %v", err) 728 } 729 tester.downloader.Cancel() 730 if !tester.downloader.queue.Idle() { 731 t.Errorf("download queue not idle") 732 } 733 } 734 735 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 736 func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) } 737 func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) } 738 func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) } 739 func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } 740 func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } 741 func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) } 742 743 func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { 744 t.Parallel() 745 746 tester := newTester() 747 defer tester.terminate() 748 749 // Create various peers with various parts of the chain 750 targetPeers := 8 751 chain := testChainBase.shorten(targetPeers * 100) 752 753 for i := 0; i < targetPeers; i++ { 754 id := fmt.Sprintf("peer #%d", i) 755 tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1))) 756 } 757 if err := tester.sync("peer #0", nil, mode); err != nil { 758 t.Fatalf("failed to synchronise blocks: %v", err) 759 } 760 assertOwnChain(t, tester, chain.len()) 761 } 762 763 // Tests that synchronisations behave well in multi-version protocol environments 764 // and not wreak havoc on other nodes in the network. 765 func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) } 766 func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) } 767 func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) } 768 func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } 769 func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } 770 func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) } 771 772 func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { 773 t.Parallel() 774 775 tester := newTester() 776 defer tester.terminate() 777 778 // Create a small enough block chain to download 779 chain := testChainBase.shorten(blockCacheItems - 15) 780 781 // Create peers of every type 782 tester.newPeer("peer 62", 62, chain) 783 tester.newPeer("peer 63", 63, chain) 784 tester.newPeer("peer 64", 64, chain) 785 786 // Synchronise with the requested peer and make sure all blocks were retrieved 787 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 788 t.Fatalf("failed to synchronise blocks: %v", err) 789 } 790 assertOwnChain(t, tester, chain.len()) 791 792 // Check that no peers have been dropped off 793 for _, version := range []int{62, 63, 64} { 794 peer := fmt.Sprintf("peer %d", version) 795 if _, ok := tester.peers[peer]; !ok { 796 t.Errorf("%s dropped", peer) 797 } 798 } 799 } 800 801 // Tests that if a block is empty (e.g. header only), no body request should be 802 // made, and instead the header should be assembled into a whole block in itself. 803 func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) } 804 func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) } 805 func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) } 806 func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } 807 func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } 808 func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) } 809 810 func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { 811 t.Parallel() 812 813 tester := newTester() 814 defer tester.terminate() 815 816 // Create a block chain to download 817 chain := testChainBase 818 tester.newPeer("peer", protocol, chain) 819 820 // Instrument the downloader to signal body requests 821 bodiesHave, receiptsHave := int32(0), int32(0) 822 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 823 atomic.AddInt32(&bodiesHave, int32(len(headers))) 824 } 825 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 826 atomic.AddInt32(&receiptsHave, int32(len(headers))) 827 } 828 // Synchronise with the peer and make sure all blocks were retrieved 829 if err := tester.sync("peer", nil, mode); err != nil { 830 t.Fatalf("failed to synchronise blocks: %v", err) 831 } 832 assertOwnChain(t, tester, chain.len()) 833 834 // Validate the number of block bodies that should have been requested 835 bodiesNeeded, receiptsNeeded := 0, 0 836 for _, block := range chain.blockm { 837 if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 838 bodiesNeeded++ 839 } 840 } 841 for _, receipt := range chain.receiptm { 842 if mode == FastSync && len(receipt) > 0 { 843 receiptsNeeded++ 844 } 845 } 846 if int(bodiesHave) != bodiesNeeded { 847 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 848 } 849 if int(receiptsHave) != receiptsNeeded { 850 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) 851 } 852 } 853 854 // Tests that headers are enqueued continuously, preventing malicious nodes from 855 // stalling the downloader by feeding gapped header chains. 856 func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) } 857 func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) } 858 func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) } 859 func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } 860 func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } 861 func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) } 862 863 func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 864 t.Parallel() 865 866 tester := newTester() 867 defer tester.terminate() 868 869 chain := testChainBase.shorten(blockCacheItems - 15) 870 brokenChain := chain.shorten(chain.len()) 871 delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2]) 872 tester.newPeer("attack", protocol, brokenChain) 873 874 if err := tester.sync("attack", nil, mode); err == nil { 875 t.Fatalf("succeeded attacker synchronisation") 876 } 877 // Synchronise with the valid peer and make sure sync succeeds 878 tester.newPeer("valid", protocol, chain) 879 if err := tester.sync("valid", nil, mode); err != nil { 880 t.Fatalf("failed to synchronise blocks: %v", err) 881 } 882 assertOwnChain(t, tester, chain.len()) 883 } 884 885 // Tests that if requested headers are shifted (i.e. first is missing), the queue 886 // detects the invalid numbering. 887 func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) } 888 func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) } 889 func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) } 890 func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } 891 func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } 892 func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) } 893 894 func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 895 t.Parallel() 896 897 tester := newTester() 898 defer tester.terminate() 899 900 chain := testChainBase.shorten(blockCacheItems - 15) 901 902 // Attempt a full sync with an attacker feeding shifted headers 903 brokenChain := chain.shorten(chain.len()) 904 delete(brokenChain.headerm, brokenChain.chain[1]) 905 delete(brokenChain.blockm, brokenChain.chain[1]) 906 delete(brokenChain.receiptm, brokenChain.chain[1]) 907 tester.newPeer("attack", protocol, brokenChain) 908 if err := tester.sync("attack", nil, mode); err == nil { 909 t.Fatalf("succeeded attacker synchronisation") 910 } 911 912 // Synchronise with the valid peer and make sure sync succeeds 913 tester.newPeer("valid", protocol, chain) 914 if err := tester.sync("valid", nil, mode); err != nil { 915 t.Fatalf("failed to synchronise blocks: %v", err) 916 } 917 assertOwnChain(t, tester, chain.len()) 918 } 919 920 // Tests that upon detecting an invalid header, the recent ones are rolled back 921 // for various failure scenarios. Afterwards a full sync is attempted to make 922 // sure no state was corrupted. 923 func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) } 924 func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } 925 func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) } 926 927 func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { 928 t.Parallel() 929 930 tester := newTester() 931 defer tester.terminate() 932 933 // Create a small enough block chain to download 934 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 935 chain := testChainBase.shorten(targetBlocks) 936 937 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 938 // This should result in the last fsHeaderSafetyNet headers being rolled back. 939 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 940 fastAttackChain := chain.shorten(chain.len()) 941 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) 942 tester.newPeer("fast-attack", protocol, fastAttackChain) 943 944 if err := tester.sync("fast-attack", nil, mode); err == nil { 945 t.Fatalf("succeeded fast attacker synchronisation") 946 } 947 if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { 948 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 949 } 950 951 // Attempt to sync with an attacker that feeds junk during the block import phase. 952 // This should result in both the last fsHeaderSafetyNet number of headers being 953 // rolled back, and also the pivot point being reverted to a non-block status. 954 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 955 blockAttackChain := chain.shorten(chain.len()) 956 delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in 957 delete(blockAttackChain.headerm, blockAttackChain.chain[missing]) 958 tester.newPeer("block-attack", protocol, blockAttackChain) 959 960 if err := tester.sync("block-attack", nil, mode); err == nil { 961 t.Fatalf("succeeded block attacker synchronisation") 962 } 963 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 964 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 965 } 966 if mode == FastSync { 967 if head := tester.CurrentBlock().NumberU64(); head != 0 { 968 t.Errorf("fast sync pivot block #%d not rolled back", head) 969 } 970 } 971 972 // Attempt to sync with an attacker that withholds promised blocks after the 973 // fast sync pivot point. This could be a trial to leave the node with a bad 974 // but already imported pivot block. 975 withholdAttackChain := chain.shorten(chain.len()) 976 tester.newPeer("withhold-attack", protocol, withholdAttackChain) 977 tester.downloader.syncInitHook = func(uint64, uint64) { 978 for i := missing; i < withholdAttackChain.len(); i++ { 979 delete(withholdAttackChain.headerm, withholdAttackChain.chain[i]) 980 } 981 tester.downloader.syncInitHook = nil 982 } 983 if err := tester.sync("withhold-attack", nil, mode); err == nil { 984 t.Fatalf("succeeded withholding attacker synchronisation") 985 } 986 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 987 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 988 } 989 if mode == FastSync { 990 if head := tester.CurrentBlock().NumberU64(); head != 0 { 991 t.Errorf("fast sync pivot block #%d not rolled back", head) 992 } 993 } 994 995 // synchronise with the valid peer and make sure sync succeeds. Since the last rollback 996 // should also disable fast syncing for this process, verify that we did a fresh full 997 // sync. Note, we can't assert anything about the receipts since we won't purge the 998 // database of them, hence we can't use assertOwnChain. 999 tester.newPeer("valid", protocol, chain) 1000 if err := tester.sync("valid", nil, mode); err != nil { 1001 t.Fatalf("failed to synchronise blocks: %v", err) 1002 } 1003 if hs := len(tester.ownHeaders); hs != chain.len() { 1004 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len()) 1005 } 1006 if mode != LightSync { 1007 if bs := len(tester.ownBlocks); bs != chain.len() { 1008 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len()) 1009 } 1010 } 1011 } 1012 1013 // Tests that a peer advertising an high TD doesn't get to stall the downloader 1014 // afterwards by not sending any useful hashes. 1015 func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) } 1016 func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) } 1017 func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) } 1018 func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } 1019 func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } 1020 func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) } 1021 1022 func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { 1023 t.Parallel() 1024 1025 tester := newTester() 1026 defer tester.terminate() 1027 1028 chain := testChainBase.shorten(1) 1029 tester.newPeer("attack", protocol, chain) 1030 if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { 1031 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 1032 } 1033 } 1034 1035 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 1036 func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) } 1037 func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) } 1038 func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } 1039 1040 func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { 1041 t.Parallel() 1042 1043 // Define the disconnection requirement for individual hash fetch errors 1044 tests := []struct { 1045 result error 1046 drop bool 1047 }{ 1048 {nil, false}, // Sync succeeded, all is well 1049 {errBusy, false}, // Sync is already in progress, no problem 1050 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1051 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1052 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1053 {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it 1054 {errNoPeers, false}, // No peers to download from, soft race, no issue 1055 {errTimeout, true}, // No hashes received in due time, drop the peer 1056 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1057 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1058 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1059 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1060 {errInvalidBlock, false}, // A bad peer was detected, but not the sync origin 1061 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1062 {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin 1063 {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1064 {errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1065 {errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1066 {errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1067 {errCancelHeaderProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1068 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1069 } 1070 // Run the tests and check disconnection status 1071 tester := newTester() 1072 defer tester.terminate() 1073 chain := testChainBase.shorten(1) 1074 1075 for i, tt := range tests { 1076 // Register a new peer and ensure it's presence 1077 id := fmt.Sprintf("test %d", i) 1078 if err := tester.newPeer(id, protocol, chain); err != nil { 1079 t.Fatalf("test %d: failed to register new peer: %v", i, err) 1080 } 1081 if _, ok := tester.peers[id]; !ok { 1082 t.Fatalf("test %d: registered peer not found", i) 1083 } 1084 // Simulate a synchronisation and check the required result 1085 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1086 1087 tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) 1088 if _, ok := tester.peers[id]; !ok != tt.drop { 1089 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1090 } 1091 } 1092 } 1093 1094 // Tests that synchronisation progress (origin block number, current block number 1095 // and highest block number) is tracked and updated correctly. 1096 func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) } 1097 func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) } 1098 func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) } 1099 func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } 1100 func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } 1101 func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) } 1102 1103 func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1104 t.Parallel() 1105 1106 tester := newTester() 1107 defer tester.terminate() 1108 chain := testChainBase.shorten(blockCacheItems - 15) 1109 1110 // Set a sync init hook to catch progress changes 1111 starting := make(chan struct{}) 1112 progress := make(chan struct{}) 1113 1114 tester.downloader.syncInitHook = func(origin, latest uint64) { 1115 starting <- struct{}{} 1116 <-progress 1117 } 1118 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1119 1120 // Synchronise half the blocks and check initial progress 1121 tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2)) 1122 pending := new(sync.WaitGroup) 1123 pending.Add(1) 1124 1125 go func() { 1126 defer pending.Done() 1127 if err := tester.sync("peer-half", nil, mode); err != nil { 1128 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1129 } 1130 }() 1131 <-starting 1132 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1133 HighestBlock: uint64(chain.len()/2 - 1), 1134 }) 1135 progress <- struct{}{} 1136 pending.Wait() 1137 1138 // Synchronise all the blocks and check continuation progress 1139 tester.newPeer("peer-full", protocol, chain) 1140 pending.Add(1) 1141 go func() { 1142 defer pending.Done() 1143 if err := tester.sync("peer-full", nil, mode); err != nil { 1144 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1145 } 1146 }() 1147 <-starting 1148 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1149 StartingBlock: uint64(chain.len()/2 - 1), 1150 CurrentBlock: uint64(chain.len()/2 - 1), 1151 HighestBlock: uint64(chain.len() - 1), 1152 }) 1153 1154 // Check final progress after successful sync 1155 progress <- struct{}{} 1156 pending.Wait() 1157 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1158 StartingBlock: uint64(chain.len()/2 - 1), 1159 CurrentBlock: uint64(chain.len() - 1), 1160 HighestBlock: uint64(chain.len() - 1), 1161 }) 1162 } 1163 1164 func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { 1165 // Mark this method as a helper to report errors at callsite, not in here 1166 t.Helper() 1167 1168 p := d.Progress() 1169 p.KnownStates, p.PulledStates = 0, 0 1170 want.KnownStates, want.PulledStates = 0, 0 1171 if p != want { 1172 t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) 1173 } 1174 } 1175 1176 // Tests that synchronisation progress (origin block number and highest block 1177 // number) is tracked and updated correctly in case of a fork (or manual head 1178 // revertal). 1179 func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) } 1180 func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) } 1181 func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) } 1182 func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } 1183 func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } 1184 func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) } 1185 1186 func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1187 t.Parallel() 1188 1189 tester := newTester() 1190 defer tester.terminate() 1191 chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch) 1192 chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch) 1193 1194 // Set a sync init hook to catch progress changes 1195 starting := make(chan struct{}) 1196 progress := make(chan struct{}) 1197 1198 tester.downloader.syncInitHook = func(origin, latest uint64) { 1199 starting <- struct{}{} 1200 <-progress 1201 } 1202 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1203 1204 // Synchronise with one of the forks and check progress 1205 tester.newPeer("fork A", protocol, chainA) 1206 pending := new(sync.WaitGroup) 1207 pending.Add(1) 1208 go func() { 1209 defer pending.Done() 1210 if err := tester.sync("fork A", nil, mode); err != nil { 1211 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1212 } 1213 }() 1214 <-starting 1215 1216 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1217 HighestBlock: uint64(chainA.len() - 1), 1218 }) 1219 progress <- struct{}{} 1220 pending.Wait() 1221 1222 // Simulate a successful sync above the fork 1223 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1224 1225 // Synchronise with the second fork and check progress resets 1226 tester.newPeer("fork B", protocol, chainB) 1227 pending.Add(1) 1228 go func() { 1229 defer pending.Done() 1230 if err := tester.sync("fork B", nil, mode); err != nil { 1231 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1232 } 1233 }() 1234 <-starting 1235 checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ 1236 StartingBlock: uint64(testChainBase.len()) - 1, 1237 CurrentBlock: uint64(chainA.len() - 1), 1238 HighestBlock: uint64(chainB.len() - 1), 1239 }) 1240 1241 // Check final progress after successful sync 1242 progress <- struct{}{} 1243 pending.Wait() 1244 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1245 StartingBlock: uint64(testChainBase.len()) - 1, 1246 CurrentBlock: uint64(chainB.len() - 1), 1247 HighestBlock: uint64(chainB.len() - 1), 1248 }) 1249 } 1250 1251 // Tests that if synchronisation is aborted due to some failure, then the progress 1252 // origin is not updated in the next sync cycle, as it should be considered the 1253 // continuation of the previous sync and not a new instance. 1254 func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) } 1255 func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) } 1256 func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) } 1257 func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } 1258 func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } 1259 func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) } 1260 1261 func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1262 t.Parallel() 1263 1264 tester := newTester() 1265 defer tester.terminate() 1266 chain := testChainBase.shorten(blockCacheItems - 15) 1267 1268 // Set a sync init hook to catch progress changes 1269 starting := make(chan struct{}) 1270 progress := make(chan struct{}) 1271 1272 tester.downloader.syncInitHook = func(origin, latest uint64) { 1273 starting <- struct{}{} 1274 <-progress 1275 } 1276 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1277 1278 // Attempt a full sync with a faulty peer 1279 brokenChain := chain.shorten(chain.len()) 1280 missing := brokenChain.len() / 2 1281 delete(brokenChain.headerm, brokenChain.chain[missing]) 1282 delete(brokenChain.blockm, brokenChain.chain[missing]) 1283 delete(brokenChain.receiptm, brokenChain.chain[missing]) 1284 tester.newPeer("faulty", protocol, brokenChain) 1285 1286 pending := new(sync.WaitGroup) 1287 pending.Add(1) 1288 go func() { 1289 defer pending.Done() 1290 if err := tester.sync("faulty", nil, mode); err == nil { 1291 panic("succeeded faulty synchronisation") 1292 } 1293 }() 1294 <-starting 1295 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1296 HighestBlock: uint64(brokenChain.len() - 1), 1297 }) 1298 progress <- struct{}{} 1299 pending.Wait() 1300 afterFailedSync := tester.downloader.Progress() 1301 1302 // Synchronise with a good peer and check that the progress origin remind the same 1303 // after a failure 1304 tester.newPeer("valid", protocol, chain) 1305 pending.Add(1) 1306 go func() { 1307 defer pending.Done() 1308 if err := tester.sync("valid", nil, mode); err != nil { 1309 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1310 } 1311 }() 1312 <-starting 1313 checkProgress(t, tester.downloader, "completing", afterFailedSync) 1314 1315 // Check final progress after successful sync 1316 progress <- struct{}{} 1317 pending.Wait() 1318 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1319 CurrentBlock: uint64(chain.len() - 1), 1320 HighestBlock: uint64(chain.len() - 1), 1321 }) 1322 } 1323 1324 // Tests that if an attacker fakes a chain height, after the attack is detected, 1325 // the progress height is successfully reduced at the next sync invocation. 1326 func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) } 1327 func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) } 1328 func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) } 1329 func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } 1330 func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } 1331 func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) } 1332 1333 func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1334 t.Parallel() 1335 1336 tester := newTester() 1337 defer tester.terminate() 1338 chain := testChainBase.shorten(blockCacheItems - 15) 1339 1340 // Set a sync init hook to catch progress changes 1341 starting := make(chan struct{}) 1342 progress := make(chan struct{}) 1343 tester.downloader.syncInitHook = func(origin, latest uint64) { 1344 starting <- struct{}{} 1345 <-progress 1346 } 1347 checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) 1348 1349 // Create and sync with an attacker that promises a higher chain than available. 1350 brokenChain := chain.shorten(chain.len()) 1351 numMissing := 5 1352 for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- { 1353 delete(brokenChain.headerm, brokenChain.chain[i]) 1354 } 1355 tester.newPeer("attack", protocol, brokenChain) 1356 1357 pending := new(sync.WaitGroup) 1358 pending.Add(1) 1359 go func() { 1360 defer pending.Done() 1361 if err := tester.sync("attack", nil, mode); err == nil { 1362 panic("succeeded attacker synchronisation") 1363 } 1364 }() 1365 <-starting 1366 checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ 1367 HighestBlock: uint64(brokenChain.len() - 1), 1368 }) 1369 progress <- struct{}{} 1370 pending.Wait() 1371 afterFailedSync := tester.downloader.Progress() 1372 1373 // Synchronise with a good peer and check that the progress height has been reduced to 1374 // the true value. 1375 validChain := chain.shorten(chain.len() - numMissing) 1376 tester.newPeer("valid", protocol, validChain) 1377 pending.Add(1) 1378 1379 go func() { 1380 defer pending.Done() 1381 if err := tester.sync("valid", nil, mode); err != nil { 1382 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1383 } 1384 }() 1385 <-starting 1386 checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ 1387 CurrentBlock: afterFailedSync.CurrentBlock, 1388 HighestBlock: uint64(validChain.len() - 1), 1389 }) 1390 1391 // Check final progress after successful sync. 1392 progress <- struct{}{} 1393 pending.Wait() 1394 checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ 1395 CurrentBlock: uint64(validChain.len() - 1), 1396 HighestBlock: uint64(validChain.len() - 1), 1397 }) 1398 } 1399 1400 // This test reproduces an issue where unexpected deliveries would 1401 // block indefinitely if they arrived at the right time. 1402 func TestDeliverHeadersHang(t *testing.T) { 1403 t.Parallel() 1404 1405 testCases := []struct { 1406 protocol int 1407 syncMode SyncMode 1408 }{ 1409 {62, FullSync}, 1410 {63, FullSync}, 1411 {63, FastSync}, 1412 {64, FullSync}, 1413 {64, FastSync}, 1414 {64, LightSync}, 1415 } 1416 for _, tc := range testCases { 1417 t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) { 1418 t.Parallel() 1419 testDeliverHeadersHang(t, tc.protocol, tc.syncMode) 1420 }) 1421 } 1422 } 1423 1424 func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { 1425 master := newTester() 1426 defer master.terminate() 1427 chain := testChainBase.shorten(15) 1428 1429 for i := 0; i < 200; i++ { 1430 tester := newTester() 1431 tester.peerDb = master.peerDb 1432 tester.newPeer("peer", protocol, chain) 1433 1434 // Whenever the downloader requests headers, flood it with 1435 // a lot of unrequested header deliveries. 1436 tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ 1437 peer: tester.downloader.peers.peers["peer"].peer, 1438 tester: tester, 1439 } 1440 if err := tester.sync("peer", nil, mode); err != nil { 1441 t.Errorf("test %d: sync failed: %v", i, err) 1442 } 1443 tester.terminate() 1444 } 1445 } 1446 1447 type floodingTestPeer struct { 1448 peer Peer 1449 tester *downloadTester 1450 } 1451 1452 func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } 1453 func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { 1454 return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) 1455 } 1456 func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { 1457 return ftp.peer.RequestBodies(hashes) 1458 } 1459 func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { 1460 return ftp.peer.RequestReceipts(hashes) 1461 } 1462 func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { 1463 return ftp.peer.RequestNodeData(hashes) 1464 } 1465 1466 func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { 1467 deliveriesDone := make(chan struct{}, 500) 1468 for i := 0; i < cap(deliveriesDone)-1; i++ { 1469 peer := fmt.Sprintf("fake-peer%d", i) 1470 go func() { 1471 ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) 1472 deliveriesDone <- struct{}{} 1473 }() 1474 } 1475 1476 // None of the extra deliveries should block. 1477 timeout := time.After(60 * time.Second) 1478 launched := false 1479 for i := 0; i < cap(deliveriesDone); i++ { 1480 select { 1481 case <-deliveriesDone: 1482 if !launched { 1483 // Start delivering the requested headers 1484 // after one of the flooding responses has arrived. 1485 go func() { 1486 ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) 1487 deliveriesDone <- struct{}{} 1488 }() 1489 launched = true 1490 } 1491 case <-timeout: 1492 panic("blocked") 1493 } 1494 } 1495 return nil 1496 } 1497 1498 func TestRemoteHeaderRequestSpan(t *testing.T) { 1499 testCases := []struct { 1500 remoteHeight uint64 1501 localHeight uint64 1502 expected []int 1503 }{ 1504 // Remote is way higher. We should ask for the remote head and go backwards 1505 {1500, 1000, 1506 []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, 1507 }, 1508 {15000, 13006, 1509 []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, 1510 }, 1511 //Remote is pretty close to us. We don't have to fetch as many 1512 {1200, 1150, 1513 []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, 1514 }, 1515 // Remote is equal to us (so on a fork with higher td) 1516 // We should get the closest couple of ancestors 1517 {1500, 1500, 1518 []int{1497, 1499}, 1519 }, 1520 // We're higher than the remote! Odd 1521 {1000, 1500, 1522 []int{997, 999}, 1523 }, 1524 // Check some weird edgecases that it behaves somewhat rationally 1525 {0, 1500, 1526 []int{0, 2}, 1527 }, 1528 {6000000, 0, 1529 []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, 1530 }, 1531 {0, 0, 1532 []int{0, 2}, 1533 }, 1534 } 1535 reqs := func(from, count, span int) []int { 1536 var r []int 1537 num := from 1538 for len(r) < count { 1539 r = append(r, num) 1540 num += span + 1 1541 } 1542 return r 1543 } 1544 for i, tt := range testCases { 1545 from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) 1546 data := reqs(int(from), count, span) 1547 1548 if max != uint64(data[len(data)-1]) { 1549 t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) 1550 } 1551 failed := false 1552 if len(data) != len(tt.expected) { 1553 failed = true 1554 t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) 1555 } else { 1556 for j, n := range data { 1557 if n != tt.expected[j] { 1558 failed = true 1559 break 1560 } 1561 } 1562 } 1563 if failed { 1564 res := strings.Replace(fmt.Sprint(data), " ", ",", -1) 1565 exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) 1566 fmt.Printf("got: %v\n", res) 1567 fmt.Printf("exp: %v\n", exp) 1568 t.Errorf("test %d: wrong values", i) 1569 } 1570 } 1571 } 1572 1573 // Tests that peers below a pre-configured checkpoint block are prevented from 1574 // being fast-synced from, avoiding potential cheap eclipse attacks. 1575 func TestCheckpointEnforcement62(t *testing.T) { testCheckpointEnforcement(t, 62, FullSync) } 1576 func TestCheckpointEnforcement63Full(t *testing.T) { testCheckpointEnforcement(t, 63, FullSync) } 1577 func TestCheckpointEnforcement63Fast(t *testing.T) { testCheckpointEnforcement(t, 63, FastSync) } 1578 func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) } 1579 func TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) } 1580 func TestCheckpointEnforcement64Light(t *testing.T) { testCheckpointEnforcement(t, 64, LightSync) } 1581 1582 func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) { 1583 t.Parallel() 1584 1585 // Create a new tester with a particular hard coded checkpoint block 1586 tester := newTester() 1587 defer tester.terminate() 1588 1589 tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256 1590 chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) 1591 1592 // Attempt to sync with the peer and validate the result 1593 tester.newPeer("peer", protocol, chain) 1594 1595 var expect error 1596 if mode == FastSync { 1597 expect = errUnsyncedPeer 1598 } 1599 if err := tester.sync("peer", nil, mode); err != expect { 1600 t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) 1601 } 1602 if mode == FastSync { 1603 assertOwnChain(t, tester, 1) 1604 } else { 1605 assertOwnChain(t, tester, chain.len()) 1606 } 1607 }