gitlab.com/aquachain/aquachain@v1.17.16-rc3.0.20221018032414-e3ddf1e1c055/aqua/downloader/downloader_test.go (about) 1 // Copyright 2015 The aquachain Authors 2 // This file is part of the aquachain library. 3 // 4 // The aquachain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The aquachain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "testing" 26 "time" 27 28 "gitlab.com/aquachain/aquachain/aqua/event" 29 "gitlab.com/aquachain/aquachain/aquadb" 30 "gitlab.com/aquachain/aquachain/common" 31 "gitlab.com/aquachain/aquachain/consensus/aquahash" 32 "gitlab.com/aquachain/aquachain/core" 33 "gitlab.com/aquachain/aquachain/core/types" 34 "gitlab.com/aquachain/aquachain/crypto" 35 "gitlab.com/aquachain/aquachain/params" 36 "gitlab.com/aquachain/aquachain/trie" 37 ) 38 39 var ( 40 testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") 41 testAddress = crypto.PubkeyToAddress(testKey.PublicKey) 42 ) 43 44 // Reduce some of the parameters to make the tester faster. 45 func init() { 46 MaxForkAncestry = uint64(10000) 47 blockCacheItems = 1024 48 fsHeaderContCheck = 500 * time.Millisecond 49 50 go func() { 51 t1 := time.Now() 52 for { 53 fmt.Printf("(UTC) %s - simulated downloader test running for %s...\n", time.Now().UTC(), time.Since(t1)) 54 <-time.After(time.Second * 30) 55 } 56 }() 57 } 58 59 // downloadTester is a test simulator for mocking out local block chain. 60 type downloadTester struct { 61 downloader *Downloader 62 63 genesis *types.Block // Genesis blocks used by the tester and peers 64 stateDb aquadb.Database // Database used by the tester for syncing from peers 65 peerDb aquadb.Database // Database of the peers containing all data 66 67 ownHashes []common.Hash // Hash chain belonging to the tester 68 ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester 69 ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester 70 ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester 71 ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain 72 73 peerHashes map[string][]common.Hash // Hash chain belonging to different test peers 74 peerHeaders map[string]map[common.Hash]*types.Header // Headers belonging to different test peers 75 peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers 76 peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers 77 peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains 78 79 peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return 80 81 lock sync.RWMutex 82 83 chainConfig *params.ChainConfig 84 } 85 86 // newTester creates a new downloader test mocker. 87 func newTester() *downloadTester { 88 testdb := aquadb.NewMemDatabase() 89 genesis := core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) 90 91 tester := &downloadTester{ 92 genesis: genesis, 93 peerDb: testdb, 94 ownHashes: []common.Hash{genesis.Hash()}, 95 ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, 96 ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, 97 ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil}, 98 ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()}, 99 peerHashes: make(map[string][]common.Hash), 100 peerHeaders: make(map[string]map[common.Hash]*types.Header), 101 peerBlocks: make(map[string]map[common.Hash]*types.Block), 102 peerReceipts: make(map[string]map[common.Hash]types.Receipts), 103 peerChainTds: make(map[string]map[common.Hash]*big.Int), 104 peerMissingStates: make(map[string]map[common.Hash]bool), 105 chainConfig: params.TestChainConfig, 106 } 107 tester.stateDb = aquadb.NewMemDatabase() 108 tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00}) 109 110 tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer) 111 112 return tester 113 } 114 func (dl *downloadTester) Config() *params.ChainConfig { 115 return dl.chainConfig 116 } 117 118 func (dl *downloadTester) GetBlockVersion(height *big.Int) params.HeaderVersion { 119 return params.TestChainConfig.GetBlockVersion(height) 120 } 121 122 // makeChain creates a chain of n blocks starting at and including parent. 123 // the returned hash chain is ordered head->parent. In addition, every 3rd block 124 // contains a transaction and every 5th an uncle to allow testing correct block 125 // reassembly. 126 func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) { 127 // Generate the block chain 128 blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, aquahash.NewFaker(), dl.peerDb, n, func(i int, block *core.BlockGen) { 129 block.SetCoinbase(common.Address{seed}) 130 131 // If a heavy chain is requested, delay blocks to raise difficulty 132 if heavy { 133 block.OffsetTime(-1) 134 } 135 // If the block number is multiple of 3, send a bonus transaction to the miner 136 if parent == dl.genesis && i%3 == 0 { 137 signer := types.MakeSigner(params.TestChainConfig, block.Number()) 138 tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey) 139 if err != nil { 140 panic(err) 141 } 142 block.AddTx(tx) 143 } 144 // If the block number is a multiple of 5, add a bonus uncle to the block 145 if i > 0 && i%5 == 0 { 146 block.AddUncle(&types.Header{ 147 ParentHash: block.PrevBlock(i - 1).Hash(), 148 Number: big.NewInt(block.Number().Int64() - 1), 149 }) 150 } 151 }) 152 // Convert the block-chain into a hash-chain and header/block maps 153 hashes := make([]common.Hash, n+1) 154 hashes[len(hashes)-1] = parent.Hash() 155 156 headerm := make(map[common.Hash]*types.Header, n+1) 157 headerm[parent.Hash()] = parent.Header() 158 159 blockm := make(map[common.Hash]*types.Block, n+1) 160 blockm[parent.Hash()] = parent 161 162 receiptm := make(map[common.Hash]types.Receipts, n+1) 163 receiptm[parent.Hash()] = parentReceipts 164 165 for i, b := range blocks { 166 hashes[len(hashes)-i-2] = b.Hash() 167 headerm[b.Hash()] = b.Header() 168 blockm[b.Hash()] = b 169 receiptm[b.Hash()] = receipts[i] 170 } 171 return hashes, headerm, blockm, receiptm 172 } 173 174 // makeChainFork creates two chains of length n, such that h1[:f] and 175 // h2[:f] are different but have a common suffix of length n-f. 176 func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) { 177 // Create the common suffix 178 hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false) 179 180 // Create the forks, making the second heavyer if non balanced forks were requested 181 hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false) 182 hashes1 = append(hashes1, hashes[1:]...) 183 184 heavy := false 185 if !balanced { 186 heavy = true 187 } 188 hashes2, headers2, blocks2, receipts2 := dl.makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy) 189 hashes2 = append(hashes2, hashes[1:]...) 190 191 for hash, header := range headers { 192 headers1[hash] = header 193 headers2[hash] = header 194 } 195 for hash, block := range blocks { 196 blocks1[hash] = block 197 blocks2[hash] = block 198 } 199 for hash, receipt := range receipts { 200 receipts1[hash] = receipt 201 receipts2[hash] = receipt 202 } 203 return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2 204 } 205 206 // terminate aborts any operations on the embedded downloader and releases all 207 // held resources. 208 func (dl *downloadTester) terminate() { 209 dl.downloader.Terminate() 210 } 211 212 // sync starts synchronizing with a remote peer, blocking until it completes. 213 func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { 214 dl.lock.RLock() 215 hash := dl.peerHashes[id][0] 216 // If no particular TD was requested, load from the peer's blockchain 217 if td == nil { 218 td = big.NewInt(1) 219 if diff, ok := dl.peerChainTds[id][hash]; ok { 220 td = diff 221 } 222 } 223 dl.lock.RUnlock() 224 225 // Synchronise with the chosen peer and ensure proper cleanup afterwards 226 err := dl.downloader.synchronise(id, hash, td, mode) 227 select { 228 case <-dl.downloader.cancelCh: 229 // Ok, downloader fully cancelled after sync cycle 230 default: 231 // Downloader is still accepting packets, can block a peer up 232 panic("downloader active post sync cycle") // panic will be caught by tester 233 } 234 return err 235 } 236 237 // HasHeader checks if a header is present in the testers canonical chain. 238 func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { 239 return dl.GetHeaderByHash(hash) != nil 240 } 241 242 // HasBlock checks if a block is present in the testers canonical chain. 243 func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { 244 return dl.GetBlockByHash(hash) != nil 245 } 246 247 // GetHeader retrieves a header from the testers canonical chain. 248 func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { 249 dl.lock.RLock() 250 defer dl.lock.RUnlock() 251 252 return dl.ownHeaders[hash] 253 } 254 255 // GetBlock retrieves a block from the testers canonical chain. 256 func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { 257 dl.lock.RLock() 258 defer dl.lock.RUnlock() 259 260 return dl.ownBlocks[hash] 261 } 262 263 // CurrentHeader retrieves the current head header from the canonical chain. 264 func (dl *downloadTester) CurrentHeader() *types.Header { 265 dl.lock.RLock() 266 defer dl.lock.RUnlock() 267 268 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 269 if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { 270 return header 271 } 272 } 273 return dl.genesis.Header() 274 } 275 276 // CurrentBlock retrieves the current head block from the canonical chain. 277 func (dl *downloadTester) CurrentBlock() *types.Block { 278 dl.lock.RLock() 279 defer dl.lock.RUnlock() 280 281 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 282 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 283 if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { 284 return block 285 } 286 } 287 } 288 return dl.genesis 289 } 290 291 // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. 292 func (dl *downloadTester) CurrentFastBlock() *types.Block { 293 dl.lock.RLock() 294 defer dl.lock.RUnlock() 295 296 for i := len(dl.ownHashes) - 1; i >= 0; i-- { 297 if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { 298 return block 299 } 300 } 301 return dl.genesis 302 } 303 304 // FastSyncCommitHead manually sets the head block to a given hash. 305 func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { 306 // For now only check that the state trie is correct 307 if block := dl.GetBlockByHash(hash); block != nil { 308 _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb), 0) 309 return err 310 } 311 return fmt.Errorf("non existent block: %x", hash[:4]) 312 } 313 314 // GetTd retrieves the block's total difficulty from the canonical chain. 315 func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { 316 dl.lock.RLock() 317 defer dl.lock.RUnlock() 318 319 return dl.ownChainTd[hash] 320 } 321 322 // InsertHeaderChain injects a new batch of headers into the simulated chain. 323 func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (int, error) { 324 dl.lock.Lock() 325 defer dl.lock.Unlock() 326 327 // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors 328 if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok { 329 return 0, errors.New("unknown parent") 330 } 331 for i := 1; i < len(headers); i++ { 332 if headers[i].ParentHash != headers[i-1].Hash() { 333 return i, errors.New("unknown parent") 334 } 335 } 336 // Do a full insert if pre-checks passed 337 for i, header := range headers { 338 if _, ok := dl.ownHeaders[header.Hash()]; ok { 339 continue 340 } 341 if _, ok := dl.ownHeaders[header.ParentHash]; !ok { 342 return i, errors.New("unknown parent") 343 } 344 dl.ownHashes = append(dl.ownHashes, header.Hash()) 345 dl.ownHeaders[header.Hash()] = header 346 dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty) 347 } 348 return len(headers), nil 349 } 350 351 // InsertChain injects a new batch of blocks into the simulated chain. 352 func (dl *downloadTester) InsertChain(blocks types.Blocks) (int, error) { 353 dl.lock.Lock() 354 defer dl.lock.Unlock() 355 356 for i, block := range blocks { 357 if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { 358 return i, errors.New("unknown parent") 359 } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { 360 return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err) 361 } 362 if _, ok := dl.ownHeaders[block.Hash()]; !ok { 363 dl.ownHashes = append(dl.ownHashes, block.Hash()) 364 dl.ownHeaders[block.Hash()] = block.Header() 365 } 366 dl.ownBlocks[block.Hash()] = block 367 dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) 368 dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty()) 369 } 370 return len(blocks), nil 371 } 372 373 // InsertReceiptChain injects a new batch of receipts into the simulated chain. 374 func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (int, error) { 375 dl.lock.Lock() 376 defer dl.lock.Unlock() 377 378 for i := 0; i < len(blocks) && i < len(receipts); i++ { 379 if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { 380 return i, errors.New("unknown owner") 381 } 382 if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { 383 return i, errors.New("unknown parent") 384 } 385 dl.ownBlocks[blocks[i].Hash()] = blocks[i] 386 dl.ownReceipts[blocks[i].Hash()] = receipts[i] 387 } 388 return len(blocks), nil 389 } 390 391 // Rollback removes some recently added elements from the chain. 392 func (dl *downloadTester) Rollback(hashes []common.Hash) { 393 dl.lock.Lock() 394 defer dl.lock.Unlock() 395 396 for i := len(hashes) - 1; i >= 0; i-- { 397 if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { 398 dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] 399 } 400 delete(dl.ownChainTd, hashes[i]) 401 delete(dl.ownHeaders, hashes[i]) 402 delete(dl.ownReceipts, hashes[i]) 403 delete(dl.ownBlocks, hashes[i]) 404 } 405 } 406 407 // newPeer registers a new block download source into the downloader. 408 func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error { 409 return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0) 410 } 411 412 // newSlowPeer registers a new block download source into the downloader, with a 413 // specific delay time on processing the network packets sent to it, simulating 414 // potentially slow network IO. 415 func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error { 416 dl.lock.Lock() 417 defer dl.lock.Unlock() 418 419 var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl: dl, id: id, delay: delay}) 420 if err == nil { 421 // Assign the owned hashes, headers and blocks to the peer (deep copy) 422 dl.peerHashes[id] = make([]common.Hash, len(hashes)) 423 copy(dl.peerHashes[id], hashes) 424 425 dl.peerHeaders[id] = make(map[common.Hash]*types.Header) 426 dl.peerBlocks[id] = make(map[common.Hash]*types.Block) 427 dl.peerReceipts[id] = make(map[common.Hash]types.Receipts) 428 dl.peerChainTds[id] = make(map[common.Hash]*big.Int) 429 dl.peerMissingStates[id] = make(map[common.Hash]bool) 430 431 genesis := hashes[len(hashes)-1] 432 if header := headers[genesis]; header != nil { 433 dl.peerHeaders[id][genesis] = header 434 dl.peerChainTds[id][genesis] = header.Difficulty 435 } 436 if block := blocks[genesis]; block != nil { 437 dl.peerBlocks[id][genesis] = block 438 dl.peerChainTds[id][genesis] = block.Difficulty() 439 } 440 441 for i := len(hashes) - 2; i >= 0; i-- { 442 hash := hashes[i] 443 444 if header, ok := headers[hash]; ok { 445 dl.peerHeaders[id][hash] = header 446 if _, ok := dl.peerHeaders[id][header.ParentHash]; ok { 447 dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash]) 448 } 449 } 450 if block, ok := blocks[hash]; ok { 451 dl.peerBlocks[id][hash] = block 452 if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok { 453 dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()]) 454 } 455 } 456 if receipt, ok := receipts[hash]; ok { 457 dl.peerReceipts[id][hash] = receipt 458 } 459 } 460 } 461 return err 462 } 463 464 // dropPeer simulates a hard peer removal from the connection pool. 465 func (dl *downloadTester) dropPeer(id string) { 466 dl.lock.Lock() 467 defer dl.lock.Unlock() 468 469 delete(dl.peerHashes, id) 470 delete(dl.peerHeaders, id) 471 delete(dl.peerBlocks, id) 472 delete(dl.peerChainTds, id) 473 474 dl.downloader.UnregisterPeer(id) 475 } 476 477 type downloadTesterPeer struct { 478 dl *downloadTester 479 id string 480 delay time.Duration 481 lock sync.RWMutex 482 } 483 484 // setDelay is a thread safe setter for the network delay value. 485 //func (dlp *downloadTesterPeer) setDelay(delay time.Duration) { 486 // dlp.lock.Lock() 487 // defer dlp.lock.Unlock() 488 // 489 // dlp.delay = delay 490 //} 491 492 // waitDelay is a thread safe way to sleep for the configured time. 493 func (dlp *downloadTesterPeer) waitDelay() { 494 dlp.lock.RLock() 495 delay := dlp.delay 496 dlp.lock.RUnlock() 497 498 time.Sleep(delay) 499 } 500 501 // Head constructs a function to retrieve a peer's current head hash 502 // and total difficulty. 503 func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { 504 dlp.dl.lock.RLock() 505 defer dlp.dl.lock.RUnlock() 506 507 return dlp.dl.peerHashes[dlp.id][0], nil 508 } 509 510 // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed 511 // origin; associated with a particular peer in the download tester. The returned 512 // function can be used to retrieve batches of headers from the particular peer. 513 func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { 514 // Find the canonical number of the hash 515 dlp.dl.lock.RLock() 516 number := uint64(0) 517 for num, hash := range dlp.dl.peerHashes[dlp.id] { 518 if hash == origin { 519 number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1) 520 break 521 } 522 } 523 dlp.dl.lock.RUnlock() 524 525 // Use the absolute header fetcher to satisfy the query 526 return dlp.RequestHeadersByNumber(number, amount, skip, reverse) 527 } 528 529 // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered 530 // origin; associated with a particular peer in the download tester. The returned 531 // function can be used to retrieve batches of headers from the particular peer. 532 func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { 533 dlp.waitDelay() 534 535 dlp.dl.lock.RLock() 536 defer dlp.dl.lock.RUnlock() 537 538 // Gather the next batch of headers 539 hashes := dlp.dl.peerHashes[dlp.id] 540 headers := dlp.dl.peerHeaders[dlp.id] 541 result := make([]*types.Header, 0, amount) 542 for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ { 543 if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok { 544 result = append(result, header) 545 } 546 } 547 // Delay delivery a bit to allow attacks to unfold 548 go func() { 549 time.Sleep(time.Millisecond) 550 dlp.dl.downloader.DeliverHeaders(dlp.id, result) 551 }() 552 return nil 553 } 554 555 // RequestBodies constructs a getBlockBodies method associated with a particular 556 // peer in the download tester. The returned function can be used to retrieve 557 // batches of block bodies from the particularly requested peer. 558 func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { 559 dlp.waitDelay() 560 561 dlp.dl.lock.RLock() 562 defer dlp.dl.lock.RUnlock() 563 564 blocks := dlp.dl.peerBlocks[dlp.id] 565 566 transactions := make([][]*types.Transaction, 0, len(hashes)) 567 uncles := make([][]*types.Header, 0, len(hashes)) 568 569 for _, hash := range hashes { 570 if block, ok := blocks[hash]; ok { 571 transactions = append(transactions, block.Transactions()) 572 uncles = append(uncles, block.Uncles()) 573 } 574 } 575 go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles) 576 577 return nil 578 } 579 580 // RequestReceipts constructs a getReceipts method associated with a particular 581 // peer in the download tester. The returned function can be used to retrieve 582 // batches of block receipts from the particularly requested peer. 583 func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { 584 dlp.waitDelay() 585 586 dlp.dl.lock.RLock() 587 defer dlp.dl.lock.RUnlock() 588 589 receipts := dlp.dl.peerReceipts[dlp.id] 590 591 results := make([][]*types.Receipt, 0, len(hashes)) 592 for _, hash := range hashes { 593 if receipt, ok := receipts[hash]; ok { 594 results = append(results, receipt) 595 } 596 } 597 go dlp.dl.downloader.DeliverReceipts(dlp.id, results) 598 599 return nil 600 } 601 602 // RequestNodeData constructs a getNodeData method associated with a particular 603 // peer in the download tester. The returned function can be used to retrieve 604 // batches of node state data from the particularly requested peer. 605 func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { 606 dlp.waitDelay() 607 608 dlp.dl.lock.RLock() 609 defer dlp.dl.lock.RUnlock() 610 611 results := make([][]byte, 0, len(hashes)) 612 for _, hash := range hashes { 613 if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { 614 if !dlp.dl.peerMissingStates[dlp.id][hash] { 615 results = append(results, data) 616 } 617 } 618 } 619 go dlp.dl.downloader.DeliverNodeData(dlp.id, results) 620 621 return nil 622 } 623 624 // assertOwnChain checks if the local chain contains the correct number of items 625 // of the various chain components. 626 func assertOwnChain(t *testing.T, tester *downloadTester, length int) { 627 assertOwnForkedChain(t, tester, 1, []int{length}) 628 } 629 630 // assertOwnForkedChain checks if the local forked chain contains the correct 631 // number of items of the various chain components. 632 func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { 633 // Initialize the counters for the first fork 634 headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks 635 636 if receipts < 0 { 637 receipts = 1 638 } 639 // Update the counters for each subsequent fork 640 for _, length := range lengths[1:] { 641 headers += length - common 642 blocks += length - common 643 receipts += length - common - fsMinFullBlocks 644 } 645 if tester.downloader.mode == FullSync { 646 receipts = 1 647 } 648 if hs := len(tester.ownHeaders); hs != headers { 649 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) 650 } 651 if bs := len(tester.ownBlocks); bs != blocks { 652 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) 653 } 654 if rs := len(tester.ownReceipts); rs != receipts { 655 t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) 656 } 657 // Verify the state trie too for fast syncs 658 /*if tester.downloader.mode == FastSync { 659 pivot := uint64(0) 660 var index int 661 if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common { 662 index = pivot 663 } else { 664 index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot) 665 } 666 if index > 0 { 667 if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, state.NewDatabase(trie.NewDatabase(tester.stateDb))); statedb == nil || err != nil { 668 t.Fatalf("state reconstruction failed: %v", err) 669 } 670 } 671 }*/ 672 } 673 674 // Tests that simple synchronization against a canonical chain works correctly. 675 // In this test common ancestor lookup should be short circuited and not require 676 // binary searching. 677 func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonicalSynchronisation(t, 65, FullSync) } 678 679 // func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonicalSynchronisation(t, 65, FastSync) } 680 func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } 681 func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } 682 683 func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { 684 t.Parallel() 685 686 tester := newTester() 687 defer tester.terminate() 688 689 // Create a small enough block chain to download 690 targetBlocks := blockCacheItems - 15 691 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 692 693 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 694 695 // Synchronise with the peer and make sure all relevant data was retrieved 696 if err := tester.sync("peer", nil, mode); err != nil { 697 t.Fatalf("failed to synchronise blocks: %v", err) 698 } 699 assertOwnChain(t, tester, targetBlocks+1) 700 } 701 702 // Tests that if a large batch of blocks are being downloaded, it is throttled 703 // until the cached blocks are retrieved. 704 func TestThrottling65Full(t *testing.T) { testThrottling(t, 65, FullSync) } 705 706 // func TestThrottling65Fast(t *testing.T) { testThrottling(t, 65, FastSync) } 707 func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } 708 func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } 709 710 func testThrottling(t *testing.T, protocol int, mode SyncMode) { 711 t.Parallel() 712 tester := newTester() 713 defer tester.terminate() 714 715 // Create a long block chain to download and the tester 716 targetBlocks := 8 * blockCacheItems 717 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 718 719 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 720 721 // Wrap the importer to allow stepping 722 blocked, proceed := uint32(0), make(chan struct{}) 723 tester.downloader.chainInsertHook = func(results []*fetchResult) { 724 atomic.StoreUint32(&blocked, uint32(len(results))) 725 <-proceed 726 } 727 // Start a synchronisation concurrently 728 errc := make(chan error) 729 go func() { 730 errc <- tester.sync("peer", nil, mode) 731 }() 732 // Iteratively take some blocks, always checking the retrieval count 733 for { 734 // Check the retrieval count synchronously (! reason for this ugly block) 735 tester.lock.RLock() 736 retrieved := len(tester.ownBlocks) 737 tester.lock.RUnlock() 738 if retrieved >= targetBlocks+1 { 739 break 740 } 741 // Wait a bit for sync to throttle itself 742 var cached, frozen int 743 for start := time.Now(); time.Since(start) < 3*time.Second; { 744 time.Sleep(25 * time.Millisecond) 745 746 tester.lock.Lock() 747 tester.downloader.queue.lock.Lock() 748 cached = len(tester.downloader.queue.blockDonePool) 749 if mode == FastSync { 750 if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { 751 //if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot { 752 cached = receipts 753 //} 754 } 755 } 756 frozen = int(atomic.LoadUint32(&blocked)) 757 retrieved = len(tester.ownBlocks) 758 tester.downloader.queue.lock.Unlock() 759 tester.lock.Unlock() 760 761 if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 { 762 break 763 } 764 } 765 // Make sure we filled up the cache, then exhaust it 766 time.Sleep(25 * time.Millisecond) // give it a chance to screw up 767 768 tester.lock.RLock() 769 retrieved = len(tester.ownBlocks) 770 tester.lock.RUnlock() 771 if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 { 772 t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1) 773 } 774 // Permit the blocked blocks to import 775 if atomic.LoadUint32(&blocked) > 0 { 776 atomic.StoreUint32(&blocked, uint32(0)) 777 proceed <- struct{}{} 778 } 779 } 780 // Check that we haven't pulled more blocks than available 781 assertOwnChain(t, tester, targetBlocks+1) 782 if err := <-errc; err != nil { 783 t.Fatalf("block synchronization failed: %v", err) 784 } 785 } 786 787 // Tests that simple synchronization against a forked chain works correctly. In 788 // this test common ancestor lookup should *not* be short circuited, and a full 789 // binary search should be executed. 790 func TestForkedSync65Full(t *testing.T) { testForkedSync(t, 65, FullSync) } 791 func TestForkedSync65Fast(t *testing.T) { testForkedSync(t, 65, FastSync) } 792 func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } 793 func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } 794 795 func testForkedSync(t *testing.T, protocol int, mode SyncMode) { 796 t.Parallel() 797 798 tester := newTester() 799 defer tester.terminate() 800 801 // Create a long enough forked chain 802 common, fork := MaxHashFetch, 2*MaxHashFetch 803 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true) 804 805 tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) 806 tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) 807 808 // Synchronise with the peer and make sure all blocks were retrieved 809 if err := tester.sync("fork A", nil, mode); err != nil { 810 t.Fatalf("failed to synchronise blocks: %v", err) 811 } 812 assertOwnChain(t, tester, common+fork+1) 813 814 // Synchronise with the second peer and make sure that fork is pulled too 815 if err := tester.sync("fork B", nil, mode); err != nil { 816 t.Fatalf("failed to synchronise blocks: %v", err) 817 } 818 assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1}) 819 } 820 821 // Tests that synchronising against a much shorter but much heavyer fork works 822 // corrently and is not dropped. 823 func TestHeavyForkedSync65Full(t *testing.T) { testHeavyForkedSync(t, 65, FullSync) } 824 825 func TestHeavyForkedSync65Fast(t *testing.T) { testHeavyForkedSync(t, 65, FastSync) } 826 func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } 827 func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } 828 829 func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 830 t.Parallel() 831 832 tester := newTester() 833 defer tester.terminate() 834 835 // Create a long enough forked chain 836 common, fork := MaxHashFetch, 4*MaxHashFetch 837 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false) 838 839 tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA) 840 tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB) 841 842 // Synchronise with the peer and make sure all blocks were retrieved 843 if err := tester.sync("light", nil, mode); err != nil { 844 t.Fatalf("failed to synchronise blocks: %v", err) 845 } 846 assertOwnChain(t, tester, common+fork+1) 847 848 // Synchronise with the second peer and make sure that fork is pulled too 849 if err := tester.sync("heavy", nil, mode); err != nil { 850 t.Fatalf("failed to synchronise blocks: %v", err) 851 } 852 assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1}) 853 } 854 855 // Tests that chain forks are contained within a certain interval of the current 856 // chain head, ensuring that malicious peers cannot waste resources by feeding 857 // long dead chains. 858 func TestBoundedForkedSync65Full(t *testing.T) { testBoundedForkedSync(t, 65, FullSync) } 859 func TestBoundedForkedSync65Fast(t *testing.T) { testBoundedForkedSync(t, 65, FastSync) } 860 func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } 861 func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } 862 863 func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { 864 t.Parallel() 865 866 tester := newTester() 867 defer tester.terminate() 868 869 // Create a long enough forked chain 870 common, fork := 13, int(MaxForkAncestry+17) 871 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true) 872 873 tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA) 874 tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB) 875 876 // Synchronise with the peer and make sure all blocks were retrieved 877 if err := tester.sync("original", nil, mode); err != nil { 878 t.Fatalf("failed to synchronise blocks: %v", err) 879 } 880 assertOwnChain(t, tester, common+fork+1) 881 882 // Synchronise with the second peer and ensure that the fork is rejected to being too old 883 if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { 884 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 885 } 886 } 887 888 // Tests that chain forks are contained within a certain interval of the current 889 // chain head for short but heavy forks too. These are a bit special because they 890 // take different ancestor lookup paths. 891 func TestBoundedHeavyForkedSync65Full(t *testing.T) { testBoundedHeavyForkedSync(t, 65, FullSync) } 892 func TestBoundedHeavyForkedSync65Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 65, FastSync) } 893 func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } 894 func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } 895 896 func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { 897 t.Parallel() 898 899 tester := newTester() 900 defer tester.terminate() 901 902 // Create a long enough forked chain 903 common, fork := 13, int(MaxForkAncestry+17) 904 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, false) 905 906 tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA) 907 tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit 908 909 // Synchronise with the peer and make sure all blocks were retrieved 910 if err := tester.sync("original", nil, mode); err != nil { 911 t.Fatalf("failed to synchronise blocks: %v", err) 912 } 913 assertOwnChain(t, tester, common+fork+1) 914 915 // Synchronise with the second peer and ensure that the fork is rejected to being too old 916 if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { 917 t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) 918 } 919 } 920 921 // Tests that an inactive downloader will not accept incoming block headers and 922 // bodies. 923 func TestInactiveDownloader64(t *testing.T) { 924 t.Parallel() 925 926 tester := newTester() 927 defer tester.terminate() 928 929 // Check that neither block headers nor bodies are accepted 930 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 931 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 932 } 933 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 934 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 935 } 936 } 937 938 // Tests that an inactive downloader will not accept incoming block headers, 939 // bodies and receipts. 940 func TestInactiveDownloader65(t *testing.T) { 941 t.Parallel() 942 943 tester := newTester() 944 defer tester.terminate() 945 946 // Check that neither block headers nor bodies are accepted 947 if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { 948 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 949 } 950 if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { 951 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 952 } 953 if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { 954 t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) 955 } 956 } 957 958 // Tests that a canceled download wipes all previously accumulated state. 959 func TestCancel65Fast(t *testing.T) { testCancel(t, 65, FastSync) } 960 func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } 961 func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } 962 963 func testCancel(t *testing.T, protocol int, mode SyncMode) { 964 t.Parallel() 965 966 tester := newTester() 967 defer tester.terminate() 968 969 // Create a small enough block chain to download and the tester 970 targetBlocks := blockCacheItems - 15 971 if targetBlocks >= MaxHashFetch { 972 targetBlocks = MaxHashFetch - 15 973 } 974 if targetBlocks >= MaxHeaderFetch { 975 targetBlocks = MaxHeaderFetch - 15 976 } 977 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 978 979 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 980 981 // Make sure canceling works with a pristine downloader 982 tester.downloader.Cancel() 983 if !tester.downloader.queue.Idle() { 984 t.Errorf("download queue not idle") 985 } 986 // Synchronise with the peer, but cancel afterwards 987 if err := tester.sync("peer", nil, mode); err != nil { 988 t.Fatalf("failed to synchronise blocks: %v", err) 989 } 990 tester.downloader.Cancel() 991 if !tester.downloader.queue.Idle() { 992 t.Errorf("download queue not idle") 993 } 994 } 995 996 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). 997 func TestMultiSynchronisation65Full(t *testing.T) { testMultiSynchronisation(t, 65, FullSync) } 998 func TestMultiSynchronisation65Fast(t *testing.T) { testMultiSynchronisation(t, 65, FastSync) } 999 func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } 1000 func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } 1001 1002 func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { 1003 t.Parallel() 1004 1005 tester := newTester() 1006 defer tester.terminate() 1007 1008 // Create various peers with various parts of the chain 1009 targetPeers := 8 1010 targetBlocks := targetPeers*blockCacheItems - 15 1011 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1012 1013 for i := 0; i < targetPeers; i++ { 1014 id := fmt.Sprintf("peer #%d", i) 1015 tester.newPeer(id, protocol, hashes[i*blockCacheItems:], headers, blocks, receipts) 1016 } 1017 if err := tester.sync("peer #0", nil, mode); err != nil { 1018 t.Fatalf("failed to synchronise blocks: %v", err) 1019 } 1020 assertOwnChain(t, tester, targetBlocks+1) 1021 } 1022 1023 // Tests that synchronisations behave well in multi-version protocol environments 1024 // and not wreak havoc on other nodes in the network. 1025 func TestMultiProtoSynchronisation65Full(t *testing.T) { testMultiProtoSync(t, 65, FullSync) } 1026 1027 // func TestMultiProtoSynchronisation65Fast(t *testing.T) { testMultiProtoSync(t, 65, FastSync) } 1028 func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } 1029 func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } 1030 1031 func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { 1032 t.Parallel() 1033 1034 tester := newTester() 1035 defer tester.terminate() 1036 1037 // Create a small enough block chain to download 1038 targetBlocks := blockCacheItems - 15 1039 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1040 1041 // Create peers of every type 1042 tester.newPeer("peer 65", 65, hashes, headers, blocks, receipts) 1043 tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts) 1044 1045 // Synchronise with the requested peer and make sure all blocks were retrieved 1046 if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { 1047 t.Fatalf("failed to synchronise blocks: %v", err) 1048 } 1049 assertOwnChain(t, tester, targetBlocks+1) 1050 1051 // Check that no peers have been dropped off 1052 for _, version := range []int{65, 64} { 1053 peer := fmt.Sprintf("peer %d", version) 1054 if _, ok := tester.peerHashes[peer]; !ok { 1055 t.Errorf("%s dropped", peer) 1056 } 1057 } 1058 } 1059 1060 // Tests that if a block is empty (e.g. header only), no body request should be 1061 // made, and instead the header should be assembled into a whole block in itself. 1062 func TestEmptyShortCircuit65Full(t *testing.T) { testEmptyShortCircuit(t, 65, FullSync) } 1063 func TestEmptyShortCircuit65Fast(t *testing.T) { testEmptyShortCircuit(t, 65, FastSync) } 1064 func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } 1065 func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } 1066 1067 func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { 1068 t.Parallel() 1069 1070 tester := newTester() 1071 defer tester.terminate() 1072 1073 // Create a block chain to download 1074 targetBlocks := 2*blockCacheItems - 15 1075 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1076 1077 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 1078 1079 // Instrument the downloader to signal body requests 1080 bodiesHave, receiptsHave := int32(0), int32(0) 1081 tester.downloader.bodyFetchHook = func(headers []*types.Header) { 1082 atomic.AddInt32(&bodiesHave, int32(len(headers))) 1083 } 1084 tester.downloader.receiptFetchHook = func(headers []*types.Header) { 1085 atomic.AddInt32(&receiptsHave, int32(len(headers))) 1086 } 1087 // Synchronise with the peer and make sure all blocks were retrieved 1088 if err := tester.sync("peer", nil, mode); err != nil { 1089 t.Fatalf("failed to synchronise blocks: %v", err) 1090 } 1091 assertOwnChain(t, tester, targetBlocks+1) 1092 1093 // Validate the number of block bodies that should have been requested 1094 bodiesNeeded, receiptsNeeded := 0, 0 1095 for _, block := range blocks { 1096 if block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { 1097 bodiesNeeded++ 1098 } 1099 } 1100 for _, receipt := range receipts { 1101 if mode == FastSync && len(receipt) > 0 { 1102 receiptsNeeded++ 1103 } 1104 } 1105 if int(bodiesHave) != bodiesNeeded { 1106 t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) 1107 } 1108 if int(receiptsHave) != receiptsNeeded { 1109 t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) 1110 } 1111 } 1112 1113 // Tests that headers are enqueued continuously, preventing malicious nodes from 1114 // stalling the downloader by feeding gapped header chains. 1115 func TestMissingHeaderAttack65Full(t *testing.T) { testMissingHeaderAttack(t, 65, FullSync) } 1116 1117 func TestMissingHeaderAttack65Fast(t *testing.T) { testMissingHeaderAttack(t, 65, FastSync) } 1118 func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } 1119 func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } 1120 1121 func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 1122 t.Parallel() 1123 1124 tester := newTester() 1125 defer tester.terminate() 1126 1127 // Create a small enough block chain to download 1128 targetBlocks := blockCacheItems - 15 1129 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1130 1131 // Attempt a full sync with an attacker feeding gapped headers 1132 tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) 1133 missing := targetBlocks / 2 1134 delete(tester.peerHeaders["attack"], hashes[missing]) 1135 1136 if err := tester.sync("attack", nil, mode); err == nil { 1137 t.Fatalf("succeeded attacker synchronisation") 1138 } 1139 // Synchronise with the valid peer and make sure sync succeeds 1140 tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) 1141 if err := tester.sync("valid", nil, mode); err != nil { 1142 t.Fatalf("failed to synchronise blocks: %v", err) 1143 } 1144 assertOwnChain(t, tester, targetBlocks+1) 1145 } 1146 1147 // Tests that if requested headers are shifted (i.e. first is missing), the queue 1148 // detects the invalid numbering. 1149 func TestShiftedHeaderAttack65Full(t *testing.T) { testShiftedHeaderAttack(t, 65, FullSync) } 1150 func TestShiftedHeaderAttack65Fast(t *testing.T) { testShiftedHeaderAttack(t, 65, FastSync) } 1151 func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } 1152 func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } 1153 1154 func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { 1155 t.Parallel() 1156 1157 tester := newTester() 1158 defer tester.terminate() 1159 1160 // Create a small enough block chain to download 1161 targetBlocks := blockCacheItems - 15 1162 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1163 1164 // Attempt a full sync with an attacker feeding shifted headers 1165 tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) 1166 delete(tester.peerHeaders["attack"], hashes[len(hashes)-2]) 1167 delete(tester.peerBlocks["attack"], hashes[len(hashes)-2]) 1168 delete(tester.peerReceipts["attack"], hashes[len(hashes)-2]) 1169 1170 if err := tester.sync("attack", nil, mode); err == nil { 1171 t.Fatalf("succeeded attacker synchronisation") 1172 } 1173 // Synchronise with the valid peer and make sure sync succeeds 1174 tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) 1175 if err := tester.sync("valid", nil, mode); err != nil { 1176 t.Fatalf("failed to synchronise blocks: %v", err) 1177 } 1178 assertOwnChain(t, tester, targetBlocks+1) 1179 } 1180 1181 // Tests that upon detecting an invalid header, the recent ones are rolled back 1182 // for various failure scenarios. Afterwards a full sync is attempted to make 1183 // sure no state was corrupted. 1184 func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, 65, FastSync) } 1185 func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } 1186 1187 func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { 1188 t.Parallel() 1189 1190 tester := newTester() 1191 defer tester.terminate() 1192 1193 // Create a small enough block chain to download 1194 targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks 1195 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1196 1197 // Attempt to sync with an attacker that feeds junk during the fast sync phase. 1198 // This should result in the last fsHeaderSafetyNet headers being rolled back. 1199 tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts) 1200 missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 1201 delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) 1202 1203 if err := tester.sync("fast-attack", nil, mode); err == nil { 1204 t.Fatalf("succeeded fast attacker synchronisation") 1205 } 1206 if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { 1207 t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) 1208 } 1209 // Attempt to sync with an attacker that feeds junk during the block import phase. 1210 // This should result in both the last fsHeaderSafetyNet number of headers being 1211 // rolled back, and also the pivot point being reverted to a non-block status. 1212 tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts) 1213 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 1214 delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in 1215 delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing]) 1216 1217 if err := tester.sync("block-attack", nil, mode); err == nil { 1218 t.Fatalf("succeeded block attacker synchronisation") 1219 } 1220 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1221 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1222 } 1223 if mode == FastSync { 1224 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1225 t.Errorf("fast sync pivot block #%d not rolled back", head) 1226 } 1227 } 1228 // Attempt to sync with an attacker that withholds promised blocks after the 1229 // fast sync pivot point. This could be a trial to leave the node with a bad 1230 // but already imported pivot block. 1231 tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts) 1232 missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 1233 1234 tester.downloader.syncInitHook = func(uint64, uint64) { 1235 for i := missing; i <= len(hashes); i++ { 1236 delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i]) 1237 } 1238 tester.downloader.syncInitHook = nil 1239 } 1240 1241 if err := tester.sync("withhold-attack", nil, mode); err == nil { 1242 t.Fatalf("succeeded withholding attacker synchronisation") 1243 } 1244 if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { 1245 t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) 1246 } 1247 if mode == FastSync { 1248 if head := tester.CurrentBlock().NumberU64(); head != 0 { 1249 t.Errorf("fast sync pivot block #%d not rolled back", head) 1250 } 1251 } 1252 // Synchronise with the valid peer and make sure sync succeeds. Since the last 1253 // rollback should also disable fast syncing for this process, verify that we 1254 // did a fresh full sync. Note, we can't assert anything about the receipts 1255 // since we won't purge the database of them, hence we can't use assertOwnChain. 1256 tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) 1257 if err := tester.sync("valid", nil, mode); err != nil { 1258 t.Fatalf("failed to synchronise blocks: %v", err) 1259 } 1260 if hs := len(tester.ownHeaders); hs != len(headers) { 1261 t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers)) 1262 } 1263 if bs := len(tester.ownBlocks); bs != len(blocks) { 1264 t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks)) 1265 } 1266 } 1267 1268 // Tests that a peer advertising an high TD doesn't get to stall the downloader 1269 // afterwards by not sending any useful hashes. 1270 func TestHighTDStarvationAttack65Full(t *testing.T) { testHighTDStarvationAttack(t, 65, FullSync) } 1271 1272 func TestHighTDStarvationAttack65Fast(t *testing.T) { testHighTDStarvationAttack(t, 65, FastSync) } 1273 func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } 1274 func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } 1275 1276 func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { 1277 t.Parallel() 1278 1279 tester := newTester() 1280 defer tester.terminate() 1281 1282 hashes, headers, blocks, receipts := tester.makeChain(0, 0, tester.genesis, nil, false) 1283 tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts) 1284 1285 if err := tester.sync("attack", big.NewInt(100000000), mode); err != errStallingPeer { 1286 t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) 1287 } 1288 } 1289 1290 // Tests that misbehaving peers are disconnected, whilst behaving ones are not. 1291 func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, 65) } 1292 func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } 1293 1294 func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { 1295 t.Parallel() 1296 1297 // Define the disconnection requirement for individual hash fetch errors 1298 tests := []struct { 1299 result error 1300 drop bool 1301 }{ 1302 {nil, false}, // Sync succeeded, all is well 1303 {ErrBusy, false}, // Sync is already in progress, no problem 1304 {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop 1305 {errBadPeer, true}, // Peer was deemed bad for some reason, drop it 1306 {errStallingPeer, true}, // Peer was detected to be stalling, drop it 1307 {errNoPeers, false}, // No peers to download from, soft race, no issue 1308 {errTimeout, true}, // No hashes received in due time, drop the peer 1309 {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end 1310 {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser 1311 {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter 1312 {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop 1313 {errInvalidBlock, false}, // A bad peer was detected, but not the sync origin 1314 {errInvalidBody, false}, // A bad peer was detected, but not the sync origin 1315 {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin 1316 {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1317 {errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1318 {errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1319 {errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1320 {errCancelHeaderProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1321 {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop 1322 } 1323 // Run the tests and check disconnection status 1324 tester := newTester() 1325 defer tester.terminate() 1326 1327 for i, tt := range tests { 1328 // Register a new peer and ensure it's presence 1329 id := fmt.Sprintf("test %d", i) 1330 if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil { 1331 t.Fatalf("test %d: failed to register new peer: %v", i, err) 1332 } 1333 if _, ok := tester.peerHashes[id]; !ok { 1334 t.Fatalf("test %d: registered peer not found", i) 1335 } 1336 // Simulate a synchronisation and check the required result 1337 tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } 1338 1339 tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) 1340 if _, ok := tester.peerHashes[id]; !ok != tt.drop { 1341 t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) 1342 } 1343 } 1344 } 1345 1346 // Tests that synchronisation progress (origin block number, current block number 1347 // and highest block number) is tracked and updated correctly. 1348 func TestSyncProgress65Full(t *testing.T) { testSyncProgress(t, 65, FullSync) } 1349 func TestSyncProgress65Fast(t *testing.T) { testSyncProgress(t, 65, FastSync) } 1350 func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } 1351 func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } 1352 1353 func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1354 t.Parallel() 1355 1356 tester := newTester() 1357 defer tester.terminate() 1358 1359 // Create a small enough block chain to download 1360 targetBlocks := blockCacheItems - 15 1361 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1362 1363 // Set a sync init hook to catch progress changes 1364 starting := make(chan struct{}) 1365 progress := make(chan struct{}) 1366 1367 tester.downloader.syncInitHook = func(origin, latest uint64) { 1368 starting <- struct{}{} 1369 <-progress 1370 } 1371 // Retrieve the sync progress and ensure they are zero (pristine sync) 1372 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 { 1373 t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0) 1374 } 1375 // Synchronise half the blocks and check initial progress 1376 tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts) 1377 pending := new(sync.WaitGroup) 1378 pending.Add(1) 1379 1380 go func() { 1381 defer pending.Done() 1382 if err := tester.sync("peer-half", nil, mode); err != nil { 1383 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1384 } 1385 }() 1386 <-starting 1387 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks/2+1) { 1388 t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks/2+1) 1389 } 1390 progress <- struct{}{} 1391 pending.Wait() 1392 1393 // Synchronise all the blocks and check continuation progress 1394 tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts) 1395 pending.Add(1) 1396 1397 go func() { 1398 defer pending.Done() 1399 if err := tester.sync("peer-full", nil, mode); err != nil { 1400 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1401 } 1402 }() 1403 <-starting 1404 if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks/2+1) || progress.HighestBlock != uint64(targetBlocks) { 1405 t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks/2+1, targetBlocks) 1406 } 1407 progress <- struct{}{} 1408 pending.Wait() 1409 1410 // Check final progress after successful sync 1411 if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(targetBlocks/2+1) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) { 1412 t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2+1, targetBlocks, targetBlocks) 1413 } 1414 } 1415 1416 // Tests that synchronisation progress (origin block number and highest block 1417 // number) is tracked and updated correctly in case of a fork (or manual head 1418 // revertal). 1419 func TestForkedSyncProgress65Full(t *testing.T) { testForkedSyncProgress(t, 65, FullSync) } 1420 func TestForkedSyncProgress65Fast(t *testing.T) { testForkedSyncProgress(t, 65, FastSync) } 1421 func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } 1422 func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } 1423 1424 func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1425 t.Parallel() 1426 1427 tester := newTester() 1428 defer tester.terminate() 1429 1430 // Create a forked chain to simulate origin revertal 1431 common, fork := MaxHashFetch, 2*MaxHashFetch 1432 hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := tester.makeChainFork(common+fork, fork, tester.genesis, nil, true) 1433 1434 // Set a sync init hook to catch progress changes 1435 starting := make(chan struct{}) 1436 progress := make(chan struct{}) 1437 1438 tester.downloader.syncInitHook = func(origin, latest uint64) { 1439 starting <- struct{}{} 1440 <-progress 1441 } 1442 // Retrieve the sync progress and ensure they are zero (pristine sync) 1443 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 { 1444 t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0) 1445 } 1446 // Synchronise with one of the forks and check progress 1447 tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) 1448 pending := new(sync.WaitGroup) 1449 pending.Add(1) 1450 1451 go func() { 1452 defer pending.Done() 1453 if err := tester.sync("fork A", nil, mode); err != nil { 1454 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1455 } 1456 }() 1457 <-starting 1458 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(len(hashesA)-1) { 1459 t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, len(hashesA)-1) 1460 } 1461 progress <- struct{}{} 1462 pending.Wait() 1463 1464 // Simulate a successful sync above the fork 1465 tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight 1466 1467 // Synchronise with the second fork and check progress resets 1468 tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) 1469 pending.Add(1) 1470 1471 go func() { 1472 defer pending.Done() 1473 if err := tester.sync("fork B", nil, mode); err != nil { 1474 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1475 } 1476 }() 1477 <-starting 1478 if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesA)-1) || progress.HighestBlock != uint64(len(hashesB)-1) { 1479 t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesA)-1, len(hashesB)-1) 1480 } 1481 progress <- struct{}{} 1482 pending.Wait() 1483 1484 // Check final progress after successful sync 1485 if progress := tester.downloader.Progress(); progress.StartingBlock != uint64(common) || progress.CurrentBlock != uint64(len(hashesB)-1) || progress.HighestBlock != uint64(len(hashesB)-1) { 1486 t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, common, len(hashesB)-1, len(hashesB)-1) 1487 } 1488 } 1489 1490 // Tests that if synchronisation is aborted due to some failure, then the progress 1491 // origin is not updated in the next sync cycle, as it should be considered the 1492 // continuation of the previous sync and not a new instance. 1493 func TestFailedSyncProgress65Full(t *testing.T) { testFailedSyncProgress(t, 65, FullSync) } 1494 func TestFailedSyncProgress65Fast(t *testing.T) { testFailedSyncProgress(t, 65, FastSync) } 1495 func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } 1496 func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } 1497 1498 func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1499 t.Parallel() 1500 1501 tester := newTester() 1502 defer tester.terminate() 1503 1504 // Create a small enough block chain to download 1505 targetBlocks := blockCacheItems - 15 1506 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks, 0, tester.genesis, nil, false) 1507 1508 // Set a sync init hook to catch progress changes 1509 starting := make(chan struct{}) 1510 progress := make(chan struct{}) 1511 1512 tester.downloader.syncInitHook = func(origin, latest uint64) { 1513 starting <- struct{}{} 1514 <-progress 1515 } 1516 // Retrieve the sync progress and ensure they are zero (pristine sync) 1517 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 { 1518 t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0) 1519 } 1520 // Attempt a full sync with a faulty peer 1521 tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts) 1522 missing := targetBlocks / 2 1523 delete(tester.peerHeaders["faulty"], hashes[missing]) 1524 delete(tester.peerBlocks["faulty"], hashes[missing]) 1525 delete(tester.peerReceipts["faulty"], hashes[missing]) 1526 1527 pending := new(sync.WaitGroup) 1528 pending.Add(1) 1529 1530 go func() { 1531 defer pending.Done() 1532 if err := tester.sync("faulty", nil, mode); err == nil { 1533 panic("succeeded faulty synchronisation") 1534 } 1535 }() 1536 <-starting 1537 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks) { 1538 t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks) 1539 } 1540 progress <- struct{}{} 1541 pending.Wait() 1542 1543 // Synchronise with a good peer and check that the progress origin remind the same after a failure 1544 tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) 1545 pending.Add(1) 1546 1547 go func() { 1548 defer pending.Done() 1549 if err := tester.sync("valid", nil, mode); err != nil { 1550 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1551 } 1552 }() 1553 <-starting 1554 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks/2) || progress.HighestBlock != uint64(targetBlocks) { 1555 t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks/2, targetBlocks) 1556 } 1557 progress <- struct{}{} 1558 pending.Wait() 1559 1560 // Check final progress after successful sync 1561 if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks/2) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) { 1562 t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks/2, targetBlocks, targetBlocks) 1563 } 1564 } 1565 1566 // Tests that if an attacker fakes a chain height, after the attack is detected, 1567 // the progress height is successfully reduced at the next sync invocation. 1568 func TestFakedSyncProgress65Full(t *testing.T) { testFakedSyncProgress(t, 65, FullSync) } 1569 func TestFakedSyncProgress65Fast(t *testing.T) { testFakedSyncProgress(t, 65, FastSync) } 1570 func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } 1571 func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } 1572 1573 func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { 1574 t.Parallel() 1575 1576 tester := newTester() 1577 defer tester.terminate() 1578 1579 // Create a small block chain 1580 targetBlocks := blockCacheItems - 15 1581 hashes, headers, blocks, receipts := tester.makeChain(targetBlocks+3, 0, tester.genesis, nil, false) 1582 1583 // Set a sync init hook to catch progress changes 1584 starting := make(chan struct{}) 1585 progress := make(chan struct{}) 1586 1587 tester.downloader.syncInitHook = func(origin, latest uint64) { 1588 starting <- struct{}{} 1589 <-progress 1590 } 1591 // Retrieve the sync progress and ensure they are zero (pristine sync) 1592 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != 0 { 1593 t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, 0) 1594 } 1595 // Create and sync with an attacker that promises a higher chain than available 1596 tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) 1597 for i := 1; i < 3; i++ { 1598 delete(tester.peerHeaders["attack"], hashes[i]) 1599 delete(tester.peerBlocks["attack"], hashes[i]) 1600 delete(tester.peerReceipts["attack"], hashes[i]) 1601 } 1602 1603 pending := new(sync.WaitGroup) 1604 pending.Add(1) 1605 1606 go func() { 1607 defer pending.Done() 1608 if err := tester.sync("attack", nil, mode); err == nil { 1609 panic("succeeded attacker synchronisation") 1610 } 1611 }() 1612 <-starting 1613 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock != 0 || progress.HighestBlock != uint64(targetBlocks+3) { 1614 t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, 0, targetBlocks+3) 1615 } 1616 progress <- struct{}{} 1617 pending.Wait() 1618 1619 // Synchronise with a good peer and check that the progress height has been reduced to the true value 1620 tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts) 1621 pending.Add(1) 1622 1623 go func() { 1624 defer pending.Done() 1625 if err := tester.sync("valid", nil, mode); err != nil { 1626 panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) 1627 } 1628 }() 1629 <-starting 1630 if progress := tester.downloader.Progress(); progress.StartingBlock != 0 || progress.CurrentBlock > uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) { 1631 t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, 0, targetBlocks, targetBlocks) 1632 } 1633 progress <- struct{}{} 1634 pending.Wait() 1635 1636 // Check final progress after successful sync 1637 if progress := tester.downloader.Progress(); progress.StartingBlock > uint64(targetBlocks) || progress.CurrentBlock != uint64(targetBlocks) || progress.HighestBlock != uint64(targetBlocks) { 1638 t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", progress.StartingBlock, progress.CurrentBlock, progress.HighestBlock, targetBlocks, targetBlocks, targetBlocks) 1639 } 1640 } 1641 1642 // This test reproduces an issue where unexpected deliveries would 1643 // block indefinitely if they arrived at the right time. 1644 // We use data driven subtests to manage this so that it will be parallel on its own 1645 // and not with the other tests, avoiding intermittent failures. 1646 func TestDeliverHeadersHang(t *testing.T) { 1647 testCases := []struct { 1648 protocol int 1649 syncMode SyncMode 1650 }{ 1651 //{65, FullSync}, 1652 //{65, FastSync}, 1653 {64, FullSync}, 1654 {64, FastSync}, 1655 } 1656 for _, tc := range testCases { 1657 t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) { 1658 testDeliverHeadersHang(t, tc.protocol, tc.syncMode) 1659 }) 1660 } 1661 } 1662 1663 type floodingTestPeer struct { 1664 peer Peer 1665 tester *downloadTester 1666 pend sync.WaitGroup 1667 } 1668 1669 func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } 1670 func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { 1671 return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) 1672 } 1673 func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { 1674 return ftp.peer.RequestBodies(hashes) 1675 } 1676 func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { 1677 return ftp.peer.RequestReceipts(hashes) 1678 } 1679 func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { 1680 return ftp.peer.RequestNodeData(hashes) 1681 } 1682 1683 func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { 1684 deliveriesDone := make(chan struct{}, 500) 1685 for i := 0; i < cap(deliveriesDone); i++ { 1686 peer := fmt.Sprintf("fake-peer%d", i) 1687 ftp.pend.Add(1) 1688 1689 go func() { 1690 ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) 1691 deliveriesDone <- struct{}{} 1692 ftp.pend.Done() 1693 }() 1694 } 1695 // Deliver the actual requested headers. 1696 go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) 1697 // None of the extra deliveries should block. 1698 timeout := time.After(60 * time.Second) 1699 for i := 0; i < cap(deliveriesDone); i++ { 1700 select { 1701 case <-deliveriesDone: 1702 case <-timeout: 1703 panic("blocked") 1704 } 1705 } 1706 return nil 1707 } 1708 1709 func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { 1710 t.Parallel() 1711 1712 master := newTester() 1713 defer master.terminate() 1714 1715 hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false) 1716 for i := 0; i < 200; i++ { 1717 tester := newTester() 1718 tester.peerDb = master.peerDb 1719 1720 tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) 1721 // Whenever the downloader requests headers, flood it with 1722 // a lot of unrequested header deliveries. 1723 tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ 1724 peer: tester.downloader.peers.peers["peer"].peer, 1725 tester: tester, 1726 } 1727 if err := tester.sync("peer", nil, mode); err != nil { 1728 t.Errorf("test %d: sync failed: %v", i, err) 1729 } 1730 tester.terminate() 1731 1732 // Flush all goroutines to prevent messing with subsequent tests 1733 tester.downloader.peers.peers["peer"].peer.(*floodingTestPeer).pend.Wait() 1734 } 1735 }