github.com/luckypickle/go-ethereum-vet@v1.14.2/eth/downloader/downloader_test.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "testing" 26 "time" 27 28 "github.com/luckypickle/go-ethereum-vet/common" 29 "github.com/luckypickle/go-ethereum-vet/consensus/ethash" 30 "github.com/luckypickle/go-ethereum-vet/core" 31 "github.com/luckypickle/go-ethereum-vet/core/types" 32 "github.com/luckypickle/go-ethereum-vet/crypto" 33 "github.com/luckypickle/go-ethereum-vet/ethdb" 34 "github.com/luckypickle/go-ethereum-vet/event" 35 "github.com/luckypickle/go-ethereum-vet/params" 36 "github.com/luckypickle/go-ethereum-vet/trie" 37 ) 38 39 var ( 40 testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") 41 testAddress = crypto.PubkeyToAddress(testKey.PublicKey) 42 ) 43 44 // Reduce some of the parameters to make the tester faster. 45 func init() { 46 MaxForkAncestry = uint64(10000) 47 blockCacheItems = 1024 48 fsHeaderContCheck = 500 * time.Millisecond 49 } 50 51 // downloadTester is a test simulator for mocking out local block chain. 52 type downloadTester struct { 53 downloader *Downloader 54 55 genesis *types.Block // Genesis blocks used by the tester and peers 56 stateDb ethdb.Database // Database used by the tester for syncing from peers 57 peerDb ethdb.Database // Database of the peers containing all data 58 59 ownHashes []common.Hash // Hash chain belonging to the tester 60 ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester 61 ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester 62 ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester 63 ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain 64 65 peerHashes map[string][]common.Hash // Hash chain belonging to different test peers 66 peerHeaders map[string]map[common.Hash]*types.Header // Headers belonging to different test peers 67 peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers 68 peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers 69 peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains 70 71 peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return 72 73 lock sync.RWMutex 74 } 75 76 // newTester creates a new downloader test mocker. 77 func newTester() *downloadTester { 78 testdb := ethdb.NewMemDatabase() 79 genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) 80 81 tester := &downloadTester{ 82 genesis: genesis, 83 peerDb: testdb, 84 ownHashes: []common.Hash{genesis.Hash()}, 85 ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, 86 ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, 87 ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil}, 88 ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()}, 89 peerHashes: make(map[string][]common.Hash), 90 peerHeaders: make(map[string]map[common.Hash]*types.Header), 91 peerBlocks: make(map[string]map[common.Hash]*types.Block), 92 peerReceipts: make(map[string]map[common.Hash]types.Receipts), 93 peerChainTds: make(map[string]map[common.Hash]*big.Int), 94 peerMissingStates: make(map[string]map[common.Hash]bool), 95 } 96 tester.stateDb = ethdb.NewMemDatabase() 97 tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00}) 98 99 tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer) 100 101 return tester 102 } 103 104 // makeChain creates a chain of n blocks starting at and including parent. 105 // the returned hash chain is ordered head->parent. In addition, every 3rd block 106 // contains a transaction and every 5th an uncle to allow testing correct block 107 // reassembly. 108 func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) { 109 // Generate the block chain 110 blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), dl.peerDb, n, func(i int, block *core.BlockGen) { 111 block.SetCoinbase(common.Address{seed}) 112 113 // If a heavy chain is requested, delay blocks to raise difficulty 114 if heavy { 115 block.OffsetTime(-1) 116 } 117 // If the block number is multiple of 3, send a bonus transaction to the miner 118 if parent == dl.genesis && i%3 == 0 { 119 signer := types.MakeSigner(params.TestChainConfig, block.Number()) 120 tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey) 121 if err != nil { 122 panic(err) 123 } 124 block.AddTx(tx) 125 } 126 // If the block number is a multiple of 5, add a bonus uncle to the block 127 if i > 0 && i%5 == 0 { 128 block.AddUncle(&types.Header{ 129 ParentHash: block.PrevBlock(i - 1).Hash(), 130 Number: big.NewInt(block.Number().Int64() - 1), 131 }) 132 } 133 }) 134 // Convert the block-chain into a hash-chain and header/block maps 135 hashes := make([]common.Hash, n+1) 136 hashes[len(hashes)-1] = parent.Hash() 137 138 headerm := make(map[common.Hash]*types.Header, n+1) 139 headerm[parent.Hash()] = parent.Header() 140 141 blockm := make(map[common.Hash]*types.Block, n+1) 142 blockm[parent.Hash()] = parent 143 144 receiptm := make(map[common.Hash]types.Receipts, n+1) 145 receiptm[parent.Hash()] = parentReceipts 146 147 for i, b := range blocks { 148 hashes[len(hashes)-i-2] = b.Hash() 149 headerm[b.Hash()] = b.Header() 150 blockm[b.Hash()] = b 151 receiptm[b.Hash()] = receipts[i] 152 } 153 return hashes, headerm, blockm, receiptm 154 } 155 156 // makeChainFork creates two chains of length n, such that h1[:f] and 157 // h2[:f] are different but have a common suffix of length n-f. 158 func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) { 159 // Create the common suffix 160 hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false) 161 162 // Create the forks, making the second heavier if non balanced forks were requested 163 hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false) 164 hashes1 = append(hashes1, hashes[1:]...) 165 166 heavy := false 167 if !balanced { 168 heavy = true 169 } 170 hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy) 171 hashes2 = append(hashes2, hashes[1:]...) 172 173 for hash, header := range headers { 174 headers1[hash] = header 175 headers2[hash] = header 176 } 177 for hash, block := range blocks { 178 blocks1[hash] = block 179 blocks2[hash] = block 180 } 181 for hash, receipt := range receipts { 182 receipts1[hash] = receipt 183 receipts2[hash] = receipt 184 } 185 return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2 186 } 187 188 // terminate aborts any operations on the embedded downloader and releases all 189 // held resources. 190 func (dl *downloadTester) terminate() { 191 dl.downloader.Terminate() 192 } 193 194 // sync starts synchronizing with a remote peer, blocking until it completes. 195 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 196 dl.lock.RLock() 197 hash := dl.peerHashes[id][0] 198 // If no particular TD was requested, load from the peer's blockchain 199 if td == nil { 200 td = big.NewInt(1) 201 if diff, ok := dl.peerChainTds[id][hash]; ok { 202 td = diff 203 } 204 } 205 dl.lock.RUnlock() 206 207 // Synchronise with the chosen peer and ensure proper cleanup afterwards 208 err := dl.downloader.synchronise(id, hash, td, mode) 209 select { 210 case <-dl.downloader.cancelCh: 211 // Ok, downloader fully cancelled after sync cycle 212 default: 213 // Downloader is still accepting packets, can block a peer up 214 panic("downloader active post sync cycle") // panic will be caught by tester 215 } 216 return err 217 } 218 219 // HasHeader checks if a header is present in the testers canonical chain. 220 func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { 221 return dl.GetHeaderByHash(hash) != nil 222 } 223 224 // HasBlock checks if a block is present in the testers canonical chain. 225 func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { 226 return dl.GetBlockByHash(hash) != nil 227 } 228 229 // GetHeader retrieves a header from the testers canonical chain. 230 func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { 231 dl.lock.RLock() 232 defer dl.lock.RUnlock() 233 234 return dl.ownHeaders[hash] 235 } 236 237 // GetBlock retrieves a block from the testers canonical chain. 238 func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { 239 dl.lock.RLock() 240 defer dl.lock.RUnlock() 241 242 return dl.ownBlocks[hash] 243 } 244 245 // CurrentHeader retrieves the current head header from the canonical chain. 246 func (dl *downloadTester) CurrentHeader() *types.Header { 247 dl.lock.RLock() 248 defer dl.lock.RUnlock() 249 250 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 251 if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { 252 return header 253 } 254 } 255 return dl.genesis.Header() 256 } 257 258 // CurrentBlock retrieves the current head block from the canonical chain. 259 func (dl *downloadTester) CurrentBlock() *types.Block { 260 dl.lock.RLock() 261 defer dl.lock.RUnlock() 262 263 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 264 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 265 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 266 return block 267 } 268 } 269 } 270 return dl.genesis 271 } 272 273 // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. 274 func (dl *downloadTester) CurrentFastBlock() *types.Block { 275 dl.lock.RLock() 276 defer dl.lock.RUnlock() 277 278 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 279 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 280 return block 281 } 282 } 283 return dl.genesis 284 } 285 286 // FastSyncCommitHead manually sets the head block to a given hash. 287 func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { 288 // For now only check that the state trie is correct 289 if block := dl.GetBlockByHash(hash); block != nil { 290 _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0) 291 return err 292 } 293 return fmt.Errorf("non existent block: %x", hash[:4]) 294 } 295 296 // GetTd retrieves the block's total difficulty from the canonical chain. 297 func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { 298 dl.lock.RLock() 299 defer dl.lock.RUnlock() 300 301 return dl.ownChainTd[hash] 302 } 303 304 // InsertHeaderChain injects a new batch of headers into the simulated chain. 305 func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) { 306 dl.lock.Lock() 307 defer dl.lock.Unlock() 308 309 // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors 310 if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok { 311 return 0, errors.New("unknown parent") 312 } 313 for i := 1; i < len(headers); i++ { 314 if headers[i].ParentHash != headers[i-1].Hash() { 315 return i, errors.New("unknown parent") 316 } 317 } 318 // Do a full insert if pre-checks passed 319 for i, header := range headers { 320 if _, ok := dl.ownHeaders[header.Hash()]; ok { 321 continue 322 } 323 if _, ok := dl.ownHeaders[header.ParentHash]; !ok { 324 return i, errors.New("unknown parent") 325 } 326 dl.ownHashes = append(dl.ownHashes, header.Hash()) 327 dl.ownHeaders[header.Hash()] = header 328 dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty) 329 } 330 return len(headers), nil 331 } 332 333 // InsertChain injects a new batch of blocks into the simulated chain. 334 func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) { 335 dl.lock.Lock() 336 defer dl.lock.Unlock() 337 338 for i, block := range blocks { 339 if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { 340 return i, errors.New("unknown parent") 341 } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { 342 return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err) 343 } 344 if _, ok := dl.ownHeaders[block.Hash()]; !ok { 345 dl.ownHashes = append(dl.ownHashes, block.Hash()) 346 dl.ownHeaders[block.Hash()] = block.Header() 347 } 348 dl.ownBlocks[block.Hash()] = block 349 dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) 350 dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty()) 351 } 352 return len(blocks), nil 353 } 354 355 // InsertReceiptChain injects a new batch of receipts into the simulated chain. 356 func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) { 357 dl.lock.Lock() 358 defer dl.lock.Unlock() 359 360 for i := 0; i < len(blocks) && i < len(receipts); i++ { 361 if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { 362 return i, errors.New("unknown owner") 363 } 364 if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { 365 return i, errors.New("unknown parent") 366 } 367 dl.ownBlocks[blocks[i].Hash()] = blocks[i] 368 dl.ownReceipts[blocks[i].Hash()] = receipts[i] 369 } 370 return len(blocks), nil 371 } 372 373 // Rollback removes some recently added elements from the chain. 374 func (dl *downloadTester) Rollback(hashes []common.Hash) { 375 dl.lock.Lock() 376 defer dl.lock.Unlock() 377 378 for i := len(hashes) - 1; i >= 0; i-- { 379 if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { 380 dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] 381 } 382 delete(dl.ownChainTd, hashes[i]) 383 delete(dl.ownHeaders, hashes[i]) 384 delete(dl.ownReceipts, hashes[i]) 385 delete(dl.ownBlocks, hashes[i]) 386 } 387 } 388 389 // newPeer registers a new block download source into the downloader. 390 func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error { 391 return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0) 392 } 393 394 // newSlowPeer registers a new block download source into the downloader, with a 395 // specific delay time on processing the network packets sent to it, simulating 396 // potentially slow network IO. 397 func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error { 398 dl.lock.Lock() 399 defer dl.lock.Unlock() 400 401 var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay}) 402 if err == nil { 403 // Assign the owned hashes, headers and blocks to the peer (deep copy) 404 dl.peerHashes[id] = make([]common.Hash, len(hashes)) 405 copy(dl.peerHashes[id], hashes) 406 407 dl.peerHeaders[id] = make(map[common.Hash]*types.Header) 408 dl.peerBlocks[id] = make(map[common.Hash]*types.Block) 409 dl.peerReceipts[id] = make(map[common.Hash]types.Receipts) 410 dl.peerChainTds[id] = make(map[common.Hash]*big.Int) 411 dl.peerMissingStates[id] = make(map[common.Hash]bool) 412 413 genesis := hashes[len(hashes)-1] 414 if header := headers[genesis]; header != nil { 415 dl.peerHeaders[id][genesis] = header 416 dl.peerChainTds[id][genesis] = header.Difficulty 417 } 418 if block := blocks[genesis]; block != nil { 419 dl.peerBlocks[id][genesis] = block 420 dl.peerChainTds[id][genesis] = block.Difficulty() 421 } 422 423 for i := len(hashes) - 2; i >= 0; i-- { 424 hash := hashes[i] 425 426 if header, ok := headers[hash]; ok { 427 dl.peerHeaders[id][hash] = header 428 if _, ok := dl.peerHeaders[id][header.ParentHash]; ok { 429 dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash]) 430 } 431 } 432 if block, ok := blocks[hash]; ok { 433 dl.peerBlocks[id][hash] = block 434 if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok { 435 dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()]) 436 } 437 } 438 if receipt, ok := receipts[hash]; ok { 439 dl.peerReceipts[id][hash] = receipt 440 } 441 } 442 } 443 return err 444 } 445 446 // dropPeer simulates a hard peer removal from the connection pool. 447 func (dl *downloadTester) dropPeer(id string) { 448 dl.lock.Lock() 449 defer dl.lock.Unlock() 450 451 delete(dl.peerHashes, id) 452 delete(dl.peerHeaders, id) 453 delete(dl.peerBlocks, id) 454 delete(dl.peerChainTds, id) 455 456 dl.downloader.UnregisterPeer(id) 457 } 458 459 type downloadTesterPeer struct { 460 dl *downloadTester 461 id string 462 delay time.Duration 463 lock sync.RWMutex 464 } 465 466 // setDelay is a thread safe setter for the network delay value. 467 func (dlp *downloadTesterPeer) setDelay(delay time.Duration) { 468 dlp.lock.Lock() 469 defer dlp.lock.Unlock() 470 471 dlp.delay = delay 472 } 473 474 // waitDelay is a thread safe way to sleep for the configured time. 475 func (dlp *downloadTesterPeer) waitDelay() { 476 dlp.lock.RLock() 477 delay := dlp.delay 478 dlp.lock.RUnlock() 479 480 time.Sleep(delay) 481 } 482 483 // Head constructs a function to retrieve a peer's current head hash 484 // and total difficulty. 485 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 486 dlp.dl.lock.RLock() 487 defer dlp.dl.lock.RUnlock() 488 489 return dlp.dl.peerHashes[dlp.id][0], nil 490 } 491 492 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 493 // origin; associated with a particular peer in the download tester. The returned 494 // function can be used to retrieve batches of headers from the particular peer. 495 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { 496 // Find the canonical number of the hash 497 dlp.dl.lock.RLock() 498 number := uint64(0) 499 for num, hash := range dlp.dl.peerHashes[dlp.id] { 500 if hash == origin { 501 number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1) 502 break 503 } 504 } 505 dlp.dl.lock.RUnlock() 506 507 // Use the absolute header fetcher to satisfy the query 508 return dlp.RequestHeadersByNumber(number, amount, skip, reverse) 509 } 510 511 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 512 // origin; associated with a particular peer in the download tester. The returned 513 // function can be used to retrieve batches of headers from the particular peer. 514 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { 515 dlp.waitDelay() 516 517 dlp.dl.lock.RLock() 518 defer dlp.dl.lock.RUnlock() 519 520 // Gather the next batch of headers 521 hashes := dlp.dl.peerHashes[dlp.id] 522 headers := dlp.dl.peerHeaders[dlp.id] 523 result := make([]*types.Header, 0, amount) 524 for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ { 525 if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok { 526 result = append(result, header) 527 } 528 } 529 // Delay delivery a bit to allow attacks to unfold 530 go func() { 531 time.Sleep(time.Millisecond) 532 dlp.dl.downloader.DeliverHeaders(dlp.id, result) 533 }() 534 return nil 535 } 536 537 // RequestBodies constructs a getBlockBodies method associated with a particular 538 // peer in the download tester. The returned function can be used to retrieve 539 // batches of block bodies from the particularly requested peer. 540 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { 541 dlp.waitDelay() 542 543 dlp.dl.lock.RLock() 544 defer dlp.dl.lock.RUnlock() 545 546 blocks := dlp.dl.peerBlocks[dlp.id] 547 548 transactions := make([][]*types.Transaction, 0, len(hashes)) 549 uncles := make([][]*types.Header, 0, len(hashes)) 550 551 for _, hash := range hashes { 552 if block, ok := blocks[hash]; ok { 553 transactions = append(transactions, block.Transactions()) 554 uncles = append(uncles, block.Uncles()) 555 } 556 } 557 go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles) 558 559 return nil 560 } 561 562 // RequestReceipts constructs a getReceipts method associated with a particular 563 // peer in the download tester. The returned function can be used to retrieve 564 // batches of block receipts from the particularly requested peer. 565 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { 566 dlp.waitDelay() 567 568 dlp.dl.lock.RLock() 569 defer dlp.dl.lock.RUnlock() 570 571 receipts := dlp.dl.peerReceipts[dlp.id] 572 573 results := make([][]*types.Receipt, 0, len(hashes)) 574 for _, hash := range hashes { 575 if receipt, ok := receipts[hash]; ok { 576 results = append(results, receipt) 577 } 578 } 579 go dlp.dl.downloader.DeliverReceipts(dlp.id, results) 580 581 return nil 582 } 583 584 // RequestNodeData constructs a getNodeData method associated with a particular 585 // peer in the download tester. The returned function can be used to retrieve 586 // batches of node state data from the particularly requested peer. 587 func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { 588 dlp.waitDelay() 589 590 dlp.dl.lock.RLock() 591 defer dlp.dl.lock.RUnlock() 592 593 results := make([][]byte, 0, len(hashes)) 594 for _, hash := range hashes { 595 if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { 596 if !dlp.dl.peerMissingStates[dlp.id][hash] { 597 results = append(results, data) 598 } 599 } 600 } 601 go dlp.dl.downloader.DeliverNodeData(dlp.id, results) 602 603 return nil 604 } 605 606 // assertOwnChain checks if the local chain contains the correct number of items 607 // of the various chain components. 608 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 609 assertOwnForkedChain(t, tester, 1, []int{length}) 610 } 611 612 // assertOwnForkedChain checks if the local forked chain contains the correct 613 // number of items of the various chain components. 614 func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { 615 // Initialize the counters for the first fork 616 headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks 617 618 if receipts < 0 { 619 receipts = 1 620 } 621 // Update the counters for each subsequent fork 622 for _, length := range lengths[1:] { 623 headers += length - common 624 blocks += length - common 625 receipts += length - common - fsMinFullBlocks 626 } 627 switch tester.downloader.mode { 628 case FullSync: 629 receipts = 1 630 case LightSync: 631 blocks, receipts = 1, 1 632 } 633 if hs := len(tester.ownHeaders); hs != headers { 634 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 635 } 636 if bs := len(tester.ownBlocks); bs != blocks { 637 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 638 } 639 if rs := len(tester.ownReceipts); rs != receipts { 640 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 641 } 642 // Verify the state trie too for fast syncs 643 /*if tester.downloader.mode == FastSync { 644 pivot := uint64(0) 645 var index int 646 if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common { 647 index = pivot 648 } else { 649 index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot) 650 } 651 if index > 0 { 652 if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil { 653 t.Fatalf("state reconstruction failed: %v", err) 654 } 655 } 656 }*/ 657 } 658 659 // Tests that simple synchronization against a canonical chain works correctly. 660 // In this test common ancestor lookup should be short circuited and not require 661 // binary searching. 662 func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) } 663 func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) } 664 func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) } 665 func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } 666 func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } 667 func TestCanonicalSynchronisation64Light(t *testing.T) { 668 testCanonicalSynchronisation(t, 64, LightSync) 669 } 670 671 func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { 672 t.Parallel() 673 674 tester := newTester() 675 defer tester.terminate() 676 677 // Create a small enough block chain to download 678 targetBlocks := blockCacheItems - 15 679 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 680 681 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 682 683 // Synchronise with the peer and make sure all relevant data was retrieved 684 if err := tester.sync("peer", nil, mode); err != nil { 685 t.Fatalf("failed to synchronise blocks: %v", err) 686 } 687 assertOwnChain(t, tester, targetBlocks+1) 688 } 689 690 // Tests that if a large batch of blocks are being downloaded, it is throttled 691 // until the cached blocks are retrieved. 692 func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) } 693 func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) } 694 func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) } 695 func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } 696 func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } 697 698 func testThrottling(t *testing.T, protocol int, mode SyncMode) { 699 t.Parallel() 700 tester := newTester() 701 defer tester.terminate() 702 703 // Create a long block chain to download and the tester 704 targetBlocks := 8 * blockCacheItems 705 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 706 707 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 708 709 // Wrap the importer to allow stepping 710 blocked, proceed := uint32(0), make(chan struct{}) 711 tester.downloader.chainInsertHook = func(results []*fetchResult) { 712 atomic.StoreUint32(&blocked, uint32(len(results))) 713 <-proceed 714 } 715 // Start a synchronisation concurrently 716 errc := make(chan error) 717 go func() { 718 errc <- tester.sync("peer", nil, mode) 719 }() 720 // Iteratively take some blocks, always checking the retrieval count 721 for { 722 // Check the retrieval count synchronously (! reason for this ugly block) 723 tester.lock.RLock() 724 retrieved := len(tester.ownBlocks) 725 tester.lock.RUnlock() 726 if retrieved >= targetBlocks+1 { 727 break 728 } 729 // Wait a bit for sync to throttle itself 730 var cached, frozen int 731 for start := time.Now(); time.Since(start) < 3*time.Second; { 732 time.Sleep(25 * time.Millisecond) 733 734 tester.lock.Lock() 735 tester.downloader.queue.lock.Lock() 736 cached = len(tester.downloader.queue.blockDonePool) 737 if mode == FastSync { 738 if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { 739 //if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot { 740 cached = receipts 741 //} 742 } 743 } 744 frozen = int(atomic.LoadUint32(&blocked)) 745 retrieved = len(tester.ownBlocks) 746 tester.downloader.queue.lock.Unlock() 747 tester.lock.Unlock() 748 749 if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 { 750 break 751 } 752 } 753 // Make sure we filled up the cache, then exhaust it 754 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 755 756 tester.lock.RLock() 757 retrieved = len(tester.ownBlocks) 758 tester.lock.RUnlock() 759 if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 { 760 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1) 761 } 762 // Permit the blocked blocks to import 763 if atomic.LoadUint32(&blocked) > 0 { 764 atomic.StoreUint32(&blocked, uint32(0)) 765 proceed <- struct{}{} 766 } 767 } 768 // Check that we haven't pulled more blocks than available 769 assertOwnChain(t, tester, targetBlocks+1) 770 if err := <-errc; err != nil { 771 t.Fatalf("block synchronization failed: %v", err) 772 } 773 } 774 775 // Tests that simple synchronization against a forked chain works correctly. In 776 // this test common ancestor lookup should *not* be short circuited, and a full 777 // binary search should be executed. 778 func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) } 779 func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) } 780 func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) } 781 func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } 782 func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } 783 func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) } 784 785 func testForkedSync(t *testing.T, protocol int, mode SyncMode) { 786 t.Parallel() 787 788 tester := newTester() 789 defer tester.terminate() 790 791 // Create a long enough forked chain 792 common, fork := MaxHashFetch, 2*MaxHashFetch 793 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true) 794 795 tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) 796 tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) 797 798 // Synchronise with the peer and make sure all blocks were retrieved 799 if err := tester.sync("fork A", nil, mode); err != nil { 800 t.Fatalf("failed to synchronise blocks: %v", err) 801 } 802 assertOwnChain(t, tester, common+fork+1) 803 804 // Synchronise with the second peer and make sure that fork is pulled too 805 if err := tester.sync("fork B", nil, mode); err != nil { 806 t.Fatalf("failed to synchronise blocks: %v", err) 807 } 808 assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1}) 809 } 810 811 // Tests that synchronising against a much shorter but much heavyer fork works 812 // corrently and is not dropped. 813 func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) } 814 func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) } 815 func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) } 816 func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } 817 func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } 818 func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) } 819 820 func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 821 t.Parallel() 822 823 tester := newTester() 824 defer tester.terminate() 825 826 // Create a long enough forked chain 827 common, fork := MaxHashFetch, 4*MaxHashFetch 828 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false) 829 830 tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA) 831 tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB) 832 833 // Synchronise with the peer and make sure all blocks were retrieved 834 if err := tester.sync("light", nil, mode); err != nil { 835 t.Fatalf("failed to synchronise blocks: %v", err) 836 } 837 assertOwnChain(t, tester, common+fork+1) 838 839 // Synchronise with the second peer and make sure that fork is pulled too 840 if err := tester.sync("heavy", nil, mode); err != nil { 841 t.Fatalf("failed to synchronise blocks: %v", err) 842 } 843 assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1}) 844 } 845 846 // Tests that chain forks are contained within a certain interval of the current 847 // chain head, ensuring that malicious peers cannot waste resources by feeding 848 // long dead chains. 849 func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) } 850 func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) } 851 func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) } 852 func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } 853 func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } 854 func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) } 855 856 func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { 857 t.Parallel() 858 859 tester := newTester() 860 defer tester.terminate() 861 862 // Create a long enough forked chain 863 common, fork := 13, int(MaxForkAncestry+17) 864 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true) 865 866 tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA) 867 tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB) 868 869 // Synchronise with the peer and make sure all blocks were retrieved 870 if err := tester.sync("original", nil, mode); err != nil { 871 t.Fatalf("failed to synchronise blocks: %v", err) 872 } 873 assertOwnChain(t, tester, common+fork+1) 874 875 // Synchronise with the second peer and ensure that the fork is rejected to being too old 876 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 877 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 878 } 879 } 880 881 // Tests that chain forks are contained within a certain interval of the current 882 // chain head for short but heavy forks too. These are a bit special because they 883 // take different ancestor lookup paths. 884 func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) } 885 func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) } 886 func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) } 887 func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } 888 func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } 889 func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) } 890 891 func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 892 t.Parallel() 893 894 tester := newTester() 895 defer tester.terminate() 896 897 // Create a long enough forked chain 898 common, fork := 13, int(MaxForkAncestry+17) 899 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false) 900 901 tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA) 902 tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit 903 904 // Synchronise with the peer and make sure all blocks were retrieved 905 if err := tester.sync("original", nil, mode); err != nil { 906 t.Fatalf("failed to synchronise blocks: %v", err) 907 } 908 assertOwnChain(t, tester, common+fork+1) 909 910 // Synchronise with the second peer and ensure that the fork is rejected to being too old 911 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 912 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 913 } 914 } 915 916 // Tests that an inactive downloader will not accept incoming block headers and 917 // bodies. 918 func TestInactiveDownloader62(t *testing.T) { 919 t.Parallel() 920 921 tester := newTester() 922 defer tester.terminate() 923 924 // Check that neither block headers nor bodies are accepted 925 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 926 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 927 } 928 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 929 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 930 } 931 } 932 933 // Tests that an inactive downloader will not accept incoming block headers, 934 // bodies and receipts. 935 func TestInactiveDownloader63(t *testing.T) { 936 t.Parallel() 937 938 tester := newTester() 939 defer tester.terminate() 940 941 // Check that neither block headers nor bodies are accepted 942 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 943 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 944 } 945 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 946 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 947 } 948 if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { 949 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 950 } 951 } 952 953 // Tests that a canceled download wipes all previously accumulated state. 954 func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) } 955 func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) } 956 func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) } 957 func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } 958 func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } 959 func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) } 960 961 func testCancel(t *testing.T, protocol int, mode SyncMode) { 962 t.Parallel() 963 964 tester := newTester() 965 defer tester.terminate() 966 967 // Create a small enough block chain to download and the tester 968 targetBlocks := blockCacheItems - 15 969 if targetBlocks >= MaxHashFetch { 970 targetBlocks = MaxHashFetch - 15 971 } 972 if targetBlocks >= MaxHeaderFetch { 973 targetBlocks = MaxHeaderFetch - 15 974 } 975 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 976 977 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 978 979 // Make sure canceling works with a pristine downloader 980 tester.downloader.Cancel() 981 if !tester.downloader.queue.Idle() { 982 t.Errorf("download queue not idle") 983 } 984 // Synchronise with the peer, but cancel afterwards 985 if err := tester.sync("peer", nil, mode); err != nil { 986 t.Fatalf("failed to synchronise blocks: %v", err) 987 } 988 tester.downloader.Cancel() 989 if !tester.downloader.queue.Idle() { 990 t.Errorf("download queue not idle") 991 } 992 } 993 994 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 995 func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) } 996 func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) } 997 func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) } 998 func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } 999 func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } 1000 func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) } 1001 1002 func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { 1003 t.Parallel() 1004 1005 tester := newTester() 1006 defer tester.terminate() 1007 1008 // Create various peers with various parts of the chain 1009 targetPeers := 8 1010 targetBlocks := targetPeers*blockCacheItems - 15 1011 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1012 1013 for i := 0; i < targetPeers; i++ { 1014 id := fmt.Sprintf("peer #%d", i) 1015 tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts) 1016 } 1017 if err := tester.sync("peer #0", nil, mode); err != nil { 1018 t.Fatalf("failed to synchronise blocks: %v", err) 1019 } 1020 assertOwnChain(t, tester, targetBlocks+1) 1021 } 1022 1023 // Tests that synchronisations behave well in multi-version protocol environments 1024 // and not wreak havoc on other nodes in the network. 1025 func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) } 1026 func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) } 1027 func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) } 1028 func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } 1029 func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } 1030 func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) } 1031 1032 func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { 1033 t.Parallel() 1034 1035 tester := newTester() 1036 defer tester.terminate() 1037 1038 // Create a small enough block chain to download 1039 targetBlocks := blockCacheItems - 15 1040 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1041 1042 // Create peers of every type 1043 tester.newPeer("peer 62", 62, hashes, headers, blocks, nil) 1044 tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts) 1045 tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts) 1046 1047 // Synchronise with the requested peer and make sure all blocks were retrieved 1048 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 1049 t.Fatalf("failed to synchronise blocks: %v", err) 1050 } 1051 assertOwnChain(t, tester, targetBlocks+1) 1052 1053 // Check that no peers have been dropped off 1054 for _, version := range []int{62, 63, 64} { 1055 peer := fmt.Sprintf("peer %d", version) 1056 if _, ok := tester.peerHashes[peer]; !ok { 1057 t.Errorf("%s dropped", peer) 1058 } 1059 } 1060 } 1061 1062 // Tests that if a block is empty (e.g. header only), no body request should be 1063 // made, and instead the header should be assembled into a whole block in itself. 1064 func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) } 1065 func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) } 1066 func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) } 1067 func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } 1068 func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } 1069 func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) } 1070 1071 func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { 1072 t.Parallel() 1073 1074 tester := newTester() 1075 defer tester.terminate() 1076 1077 // Create a block chain to download 1078 targetBlocks := 2*blockCacheItems - 15 1079 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1080 1081 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 1082 1083 // Instrument the downloader to signal body requests 1084 bodiesHave, receiptsHave := int32(0), int32(0) 1085 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 1086 atomic.AddInt32(&bodiesHave, int32(len(headers))) 1087 } 1088 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 1089 atomic.AddInt32(&receiptsHave, int32(len(headers))) 1090 } 1091 // Synchronise with the peer and make sure all blocks were retrieved 1092 if err := tester.sync("peer", nil, mode); err != nil { 1093 t.Fatalf("failed to synchronise blocks: %v", err) 1094 } 1095 assertOwnChain(t, tester, targetBlocks+1) 1096 1097 // Validate the number of block bodies that should have been requested 1098 bodiesNeeded, receiptsNeeded := 0, 0 1099 for _, block := range blocks { 1100 if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 1101 bodiesNeeded++ 1102 } 1103 } 1104 for _, receipt := range receipts { 1105 if mode == FastSync && len(receipt) > 0 { 1106 receiptsNeeded++ 1107 } 1108 } 1109 if int(bodiesHave) != bodiesNeeded { 1110 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 1111 } 1112 if int(receiptsHave) != receiptsNeeded { 1113 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) 1114 } 1115 } 1116 1117 // Tests that headers are enqueued continuously, preventing malicious nodes from 1118 // stalling the downloader by feeding gapped header chains. 1119 func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) } 1120 func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) } 1121 func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) } 1122 func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } 1123 func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } 1124 func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) } 1125 1126 func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 1127 t.Parallel() 1128 1129 tester := newTester() 1130 defer tester.terminate() 1131 1132 // Create a small enough block chain to download 1133 targetBlocks := blockCacheItems - 15 1134 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1135 1136 // Attempt a full sync with an attacker feeding gapped headers 1137 tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) 1138 missing := targetBlocks / 2 1139 delete(tester.peerHeaders["attack"], hashes[missing]) 1140 1141 if err := tester.sync("attack", nil, mode); err == nil { 1142 t.Fatalf("succeeded attacker synchronisation") 1143 } 1144 // Synchronise with the valid peer and make sure sync succeeds 1145 tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) 1146 if err := tester.sync("valid", nil, mode); err != nil { 1147 t.Fatalf("failed to synchronise blocks: %v", err) 1148 } 1149 assertOwnChain(t, tester, targetBlocks+1) 1150 } 1151 1152 // Tests that if requested headers are shifted (i.e. first is missing), the queue 1153 // detects the invalid numbering. 1154 func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) } 1155 func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) } 1156 func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) } 1157 func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } 1158 func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } 1159 func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) } 1160 1161 func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 1162 t.Parallel() 1163 1164 tester := newTester() 1165 defer tester.terminate() 1166 1167 // Create a small enough block chain to download 1168 targetBlocks := blockCacheItems - 15 1169 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1170 1171 // Attempt a full sync with an attacker feeding shifted headers 1172 tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) 1173 delete(tester.peerHeaders["attack"], hashes[len(hashes)-2]) 1174 delete(tester.peerBlocks["attack"], hashes[len(hashes)-2]) 1175 delete(tester.peerReceipts["attack"], hashes[len(hashes)-2]) 1176 1177 if err := tester.sync("attack", nil, mode); err == nil { 1178 t.Fatalf("succeeded attacker synchronisation") 1179 } 1180 // Synchronise with the valid peer and make sure sync succeeds 1181 tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) 1182 if err := tester.sync("valid", nil, mode); err != nil { 1183 t.Fatalf("failed to synchronise blocks: %v", err) 1184 } 1185 assertOwnChain(t, tester, targetBlocks+1) 1186 } 1187 1188 // Tests that upon detecting an invalid header, the recent ones are rolled back 1189 // for various failure scenarios. Afterwards a full sync is attempted to make 1190 // sure no state was corrupted. 1191 func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) } 1192 func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } 1193 func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) } 1194 1195 func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { 1196 t.Parallel() 1197 1198 tester := newTester() 1199 defer tester.terminate() 1200 1201 // Create a small enough block chain to download 1202 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 1203 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1204 1205 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 1206 // This should result in the last fsHeaderSafetyNet headers being rolled back. 1207 tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts) 1208 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 1209 delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) 1210 1211 if err := tester.sync("fast-attack", nil, mode); err == nil { 1212 t.Fatalf("succeeded fast attacker synchronisation") 1213 } 1214 if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { 1215 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 1216 } 1217 // Attempt to sync with an attacker that feeds junk during the block import phase. 1218 // This should result in both the last fsHeaderSafetyNet number of headers being 1219 // rolled back, and also the pivot point being reverted to a non-block status. 1220 tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts) 1221 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 1222 delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in 1223 delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing]) 1224 1225 if err := tester.sync("block-attack", nil, mode); err == nil { 1226 t.Fatalf("succeeded block attacker synchronisation") 1227 } 1228 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1229 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1230 } 1231 if mode == FastSync { 1232 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1233 t.Errorf("fast sync pivot block #%d not rolled back", head) 1234 } 1235 } 1236 // Attempt to sync with an attacker that withholds promised blocks after the 1237 // fast sync pivot point. This could be a trial to leave the node with a bad 1238 // but already imported pivot block. 1239 tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts) 1240 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 1241 1242 tester.downloader.syncInitHook = func(uint64, uint64) { 1243 for i := missing; i <= len(hashes); i++ { 1244 delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i]) 1245 } 1246 tester.downloader.syncInitHook = nil 1247 } 1248 1249 if err := tester.sync("withhold-attack", nil, mode); err == nil { 1250 t.Fatalf("succeeded withholding attacker synchronisation") 1251 } 1252 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1253 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1254 } 1255 if mode == FastSync { 1256 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1257 t.Errorf("fast sync pivot block #%d not rolled back", head) 1258 } 1259 } 1260 // Synchronise with the valid peer and make sure sync succeeds. Since the last 1261 // rollback should also disable fast syncing for this process, verify that we 1262 // did a fresh full sync. Note, we can't assert anything about the receipts 1263 // since we won't purge the database of them, hence we can't use assertOwnChain. 1264 tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) 1265 if err := tester.sync("valid", nil, mode); err != nil { 1266 t.Fatalf("failed to synchronise blocks: %v", err) 1267 } 1268 if hs := len(tester.ownHeaders); hs != len(headers) { 1269 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers)) 1270 } 1271 if mode != LightSync { 1272 if bs := len(tester.ownBlocks); bs != len(blocks) { 1273 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks)) 1274 } 1275 } 1276 } 1277 1278 // Tests that a peer advertising an high TD doesn't get to stall the downloader 1279 // afterwards by not sending any useful hashes. 1280 func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) } 1281 func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) } 1282 func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) } 1283 func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } 1284 func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } 1285 func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) } 1286 1287 func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { 1288 t.Parallel() 1289 1290 tester := newTester() 1291 defer tester.terminate() 1292 1293 hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false) 1294 tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts) 1295 1296 if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { 1297 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 1298 } 1299 } 1300 1301 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 1302 func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) } 1303 func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) } 1304 func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } 1305 1306 func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { 1307 t.Parallel() 1308 1309 // Define the disconnection requirement for individual hash fetch errors 1310 tests := []struct { 1311 result error 1312 drop bool 1313 }{ 1314 {nil, false}, // Sync succeeded, all is well 1315 {errBusy, false}, // Sync is already in progress, no problem 1316 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1317 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1318 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1319 {errNoPeers, false}, // No peers to download from, soft race, no issue 1320 {errTimeout, true}, // No hashes received in due time, drop the peer 1321 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1322 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1323 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1324 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1325 {errInvalidBlock, false}, // A bad peer was detected, but not the sync origin 1326 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1327 {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin 1328 {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1329 {errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1330 {errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1331 {errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1332 {errCancelHeaderProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1333 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1334 } 1335 // Run the tests and check disconnection status 1336 tester := newTester() 1337 defer tester.terminate() 1338 1339 for i, tt := range tests { 1340 // Register a new peer and ensure it's presence 1341 id := fmt.Sprintf("test %d", i) 1342 if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil { 1343 t.Fatalf("test %d: failed to register new peer: %v", i, err) 1344 } 1345 if _, ok := tester.peerHashes[id]; !ok { 1346 t.Fatalf("test %d: registered peer not found", i) 1347 } 1348 // Simulate a synchronisation and check the required result 1349 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1350 1351 tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) 1352 if _, ok := tester.peerHashes[id]; !ok != tt.drop { 1353 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1354 } 1355 } 1356 } 1357 1358 // Tests that synchronisation progress (origin block number, current block number 1359 // and highest block number) is tracked and updated correctly. 1360 func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) } 1361 func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) } 1362 func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) } 1363 func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } 1364 func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } 1365 func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) } 1366 1367 func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1368 t.Parallel() 1369 1370 tester := newTester() 1371 defer tester.terminate() 1372 1373 // Create a small enough block chain to download 1374 targetBlocks := blockCacheItems - 15 1375 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1376 1377 // Set a sync init hook to catch progress changes 1378 starting := make(chan struct{}) 1379 progress := make(chan struct{}) 1380 1381 tester.downloader.syncInitHook = func(origin, latest uint64) { 1382 starting <- struct{}{} 1383 <-progress 1384 } 1385 // Retrieve the sync progress and ensure they are zero (pristine sync) 1386 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 { 1387 t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0) 1388 } 1389 // Synchronise half the blocks and check initial progress 1390 tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts) 1391 pending := new(sync.WaitGroup) 1392 pending.Add(1) 1393 1394 go func() { 1395 defer pending.Done() 1396 if err := tester.sync("peer-half", nil, mode); err != nil { 1397 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1398 } 1399 }() 1400 <-starting 1401 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) { 1402 t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1) 1403 } 1404 progress <- struct{}{} 1405 pending.Wait() 1406 1407 // Synchronise all the blocks and check continuation progress 1408 tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts) 1409 pending.Add(1) 1410 1411 go func() { 1412 defer pending.Done() 1413 if err := tester.sync("peer-full", nil, mode); err != nil { 1414 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1415 } 1416 }() 1417 <-starting 1418 if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) { 1419 t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks) 1420 } 1421 progress <- struct{}{} 1422 pending.Wait() 1423 1424 // Check final progress after successful sync 1425 if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) { 1426 t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks) 1427 } 1428 } 1429 1430 // Tests that synchronisation progress (origin block number and highest block 1431 // number) is tracked and updated correctly in case of a fork (or manual head 1432 // revertal). 1433 func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) } 1434 func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) } 1435 func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) } 1436 func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } 1437 func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } 1438 func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) } 1439 1440 func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1441 t.Parallel() 1442 1443 tester := newTester() 1444 defer tester.terminate() 1445 1446 // Create a forked chain to simulate origin revertal 1447 common, fork := MaxHashFetch, 2*MaxHashFetch 1448 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true) 1449 1450 // Set a sync init hook to catch progress changes 1451 starting := make(chan struct{}) 1452 progress := make(chan struct{}) 1453 1454 tester.downloader.syncInitHook = func(origin, latest uint64) { 1455 starting <- struct{}{} 1456 <-progress 1457 } 1458 // Retrieve the sync progress and ensure they are zero (pristine sync) 1459 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 { 1460 t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0) 1461 } 1462 // Synchronise with one of the forks and check progress 1463 tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) 1464 pending := new(sync.WaitGroup) 1465 pending.Add(1) 1466 1467 go func() { 1468 defer pending.Done() 1469 if err := tester.sync("fork A", nil, mode); err != nil { 1470 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1471 } 1472 }() 1473 <-starting 1474 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) { 1475 t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1) 1476 } 1477 progress <- struct{}{} 1478 pending.Wait() 1479 1480 // Simulate a successful sync above the fork 1481 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1482 1483 // Synchronise with the second fork and check progress resets 1484 tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) 1485 pending.Add(1) 1486 1487 go func() { 1488 defer pending.Done() 1489 if err := tester.sync("fork B", nil, mode); err != nil { 1490 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1491 } 1492 }() 1493 <-starting 1494 if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) { 1495 t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1) 1496 } 1497 progress <- struct{}{} 1498 pending.Wait() 1499 1500 // Check final progress after successful sync 1501 if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) { 1502 t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1) 1503 } 1504 } 1505 1506 // Tests that if synchronisation is aborted due to some failure, then the progress 1507 // origin is not updated in the next sync cycle, as it should be considered the 1508 // continuation of the previous sync and not a new instance. 1509 func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) } 1510 func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) } 1511 func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) } 1512 func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } 1513 func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } 1514 func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) } 1515 1516 func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1517 t.Parallel() 1518 1519 tester := newTester() 1520 defer tester.terminate() 1521 1522 // Create a small enough block chain to download 1523 targetBlocks := blockCacheItems - 15 1524 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1525 1526 // Set a sync init hook to catch progress changes 1527 starting := make(chan struct{}) 1528 progress := make(chan struct{}) 1529 1530 tester.downloader.syncInitHook = func(origin, latest uint64) { 1531 starting <- struct{}{} 1532 <-progress 1533 } 1534 // Retrieve the sync progress and ensure they are zero (pristine sync) 1535 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 { 1536 t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0) 1537 } 1538 // Attempt a full sync with a faulty peer 1539 tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts) 1540 missing := targetBlocks / 2 1541 delete(tester.peerHeaders["faulty"], hashes[missing]) 1542 delete(tester.peerBlocks["faulty"], hashes[missing]) 1543 delete(tester.peerReceipts["faulty"], hashes[missing]) 1544 1545 pending := new(sync.WaitGroup) 1546 pending.Add(1) 1547 1548 go func() { 1549 defer pending.Done() 1550 if err := tester.sync("faulty", nil, mode); err == nil { 1551 panic("succeeded faulty synchronisation") 1552 } 1553 }() 1554 <-starting 1555 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) { 1556 t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks) 1557 } 1558 progress <- struct{}{} 1559 pending.Wait() 1560 1561 // Synchronise with a good peer and check that the progress origin remind the same after a failure 1562 tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) 1563 pending.Add(1) 1564 1565 go func() { 1566 defer pending.Done() 1567 if err := tester.sync("valid", nil, mode); err != nil { 1568 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1569 } 1570 }() 1571 <-starting 1572 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) { 1573 t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks) 1574 } 1575 progress <- struct{}{} 1576 pending.Wait() 1577 1578 // Check final progress after successful sync 1579 if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) { 1580 t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks) 1581 } 1582 } 1583 1584 // Tests that if an attacker fakes a chain height, after the attack is detected, 1585 // the progress height is successfully reduced at the next sync invocation. 1586 func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) } 1587 func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) } 1588 func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) } 1589 func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } 1590 func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } 1591 func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) } 1592 1593 func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1594 t.Parallel() 1595 1596 tester := newTester() 1597 defer tester.terminate() 1598 1599 // Create a small block chain 1600 targetBlocks := blockCacheItems - 15 1601 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false) 1602 1603 // Set a sync init hook to catch progress changes 1604 starting := make(chan struct{}) 1605 progress := make(chan struct{}) 1606 1607 tester.downloader.syncInitHook = func(origin, latest uint64) { 1608 starting <- struct{}{} 1609 <-progress 1610 } 1611 // Retrieve the sync progress and ensure they are zero (pristine sync) 1612 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 { 1613 t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0) 1614 } 1615 // Create and sync with an attacker that promises a higher chain than available 1616 tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) 1617 for i := 1; i < 3; i++ { 1618 delete(tester.peerHeaders["attack"], hashes[i]) 1619 delete(tester.peerBlocks["attack"], hashes[i]) 1620 delete(tester.peerReceipts["attack"], hashes[i]) 1621 } 1622 1623 pending := new(sync.WaitGroup) 1624 pending.Add(1) 1625 1626 go func() { 1627 defer pending.Done() 1628 if err := tester.sync("attack", nil, mode); err == nil { 1629 panic("succeeded attacker synchronisation") 1630 } 1631 }() 1632 <-starting 1633 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) { 1634 t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3) 1635 } 1636 progress <- struct{}{} 1637 pending.Wait() 1638 1639 // Synchronise with a good peer and check that the progress height has been reduced to the true value 1640 tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts) 1641 pending.Add(1) 1642 1643 go func() { 1644 defer pending.Done() 1645 if err := tester.sync("valid", nil, mode); err != nil { 1646 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1647 } 1648 }() 1649 <-starting 1650 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) { 1651 t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks) 1652 } 1653 progress <- struct{}{} 1654 pending.Wait() 1655 1656 // Check final progress after successful sync 1657 if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) { 1658 t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks) 1659 } 1660 } 1661 1662 // This test reproduces an issue where unexpected deliveries would 1663 // block indefinitely if they arrived at the right time. 1664 // We use data driven subtests to manage this so that it will be parallel on its own 1665 // and not with the other tests, avoiding intermittent failures. 1666 func TestDeliverHeadersHang(t *testing.T) { 1667 testCases := []struct { 1668 protocol int 1669 syncMode SyncMode 1670 }{ 1671 // {62, FullSync}, 1672 // {63, FullSync}, 1673 // {63, FastSync}, 1674 // {64, FullSync}, 1675 // {64, FastSync}, 1676 // {64, LightSync}, 1677 } 1678 for _, tc := range testCases { 1679 t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) { 1680 testDeliverHeadersHang(t, tc.protocol, tc.syncMode) 1681 }) 1682 } 1683 } 1684 1685 type floodingTestPeer struct { 1686 peer Peer 1687 tester *downloadTester 1688 pend sync.WaitGroup 1689 } 1690 1691 func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } 1692 func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { 1693 return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) 1694 } 1695 func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { 1696 return ftp.peer.RequestBodies(hashes) 1697 } 1698 func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { 1699 return ftp.peer.RequestReceipts(hashes) 1700 } 1701 func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { 1702 return ftp.peer.RequestNodeData(hashes) 1703 } 1704 1705 func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { 1706 deliveriesDone := make(chan struct{}, 500) 1707 for i := 0; i < cap(deliveriesDone); i++ { 1708 peer := fmt.Sprintf("fake-peer%d", i) 1709 ftp.pend.Add(1) 1710 1711 go func() { 1712 ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) 1713 deliveriesDone <- struct{}{} 1714 ftp.pend.Done() 1715 }() 1716 } 1717 // Deliver the actual requested headers. 1718 go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) 1719 // None of the extra deliveries should block. 1720 timeout := time.After(60 * time.Second) 1721 for i := 0; i < cap(deliveriesDone); i++ { 1722 select { 1723 case <-deliveriesDone: 1724 case <-timeout: 1725 panic("blocked") 1726 } 1727 } 1728 return nil 1729 } 1730 1731 func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { 1732 t.Parallel() 1733 1734 master := newTester() 1735 defer master.terminate() 1736 1737 hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false) 1738 for i := 0; i < 200; i++ { 1739 tester := newTester() 1740 tester.peerDb = master.peerDb 1741 1742 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 1743 // Whenever the downloader requests headers, flood it with 1744 // a lot of unrequested header deliveries. 1745 tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ 1746 peer: tester.downloader.peers.peers["peer"].peer, 1747 tester: tester, 1748 } 1749 if err := tester.sync("peer", nil, mode); err != nil { 1750 t.Errorf("test %d: sync failed: %v", i, err) 1751 } 1752 tester.terminate() 1753 1754 // Flush all goroutines to prevent messing with subsequent tests 1755 tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait() 1756 } 1757 }