github.com/ylsgit/go-ethereum@v1.6.5/eth/downloader/queue.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the block download scheduler to collect download tasks and schedule 18 // them in an ordered, and throttled way. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "sync" 26 "time" 27 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/core/state" 30 "github.com/ethereum/go-ethereum/core/types" 31 "github.com/ethereum/go-ethereum/crypto" 32 "github.com/ethereum/go-ethereum/ethdb" 33 "github.com/ethereum/go-ethereum/log" 34 "github.com/ethereum/go-ethereum/trie" 35 "github.com/rcrowley/go-metrics" 36 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 37 ) 38 39 var ( 40 blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download 41 maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently 42 ) 43 44 var ( 45 errNoFetchesPending = errors.New("no fetches pending") 46 errStaleDelivery = errors.New("stale delivery") 47 ) 48 49 // fetchRequest is a currently running data retrieval operation. 50 type fetchRequest struct { 51 Peer *peer // Peer to which the request was sent 52 From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) 53 Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority) 54 Headers []*types.Header // [eth/62] Requested headers, sorted by request order 55 Time time.Time // Time when the request was made 56 } 57 58 // fetchResult is a struct collecting partial results from data fetchers until 59 // all outstanding pieces complete and the result as a whole can be processed. 60 type fetchResult struct { 61 Pending int // Number of data fetches still pending 62 63 Header *types.Header 64 Uncles []*types.Header 65 Transactions types.Transactions 66 Receipts types.Receipts 67 } 68 69 // queue represents hashes that are either need fetching or are being fetched 70 type queue struct { 71 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 72 fastSyncPivot uint64 // Block number where the fast sync pivots into archive synchronisation mode 73 74 headerHead common.Hash // [eth/62] Hash of the last queued header to verify order 75 76 // Headers are "special", they download in batches, supported by a skeleton chain 77 headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers 78 headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for 79 headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable 80 headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations 81 headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers 82 headerProced int // [eth/62] Number of headers already processed from the results 83 headerOffset uint64 // [eth/62] Number of the first header in the result cache 84 headerContCh chan bool // [eth/62] Channel to notify when header download finishes 85 86 // All data retrievals below are based on an already assembles header chain 87 blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers 88 blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for 89 blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations 90 blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches 91 92 receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers 93 receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for 94 receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations 95 receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches 96 97 stateTaskIndex int // [eth/63] Counter indexing the added hashes to ensure prioritised retrieval order 98 stateTaskPool map[common.Hash]int // [eth/63] Pending node data retrieval tasks, mapping to their priority 99 stateTaskQueue *prque.Prque // [eth/63] Priority queue of the hashes to fetch the node data for 100 statePendPool map[string]*fetchRequest // [eth/63] Currently pending node data retrieval operations 101 102 stateDatabase ethdb.Database // [eth/63] Trie database to populate during state reassembly 103 stateScheduler *state.StateSync // [eth/63] State trie synchronisation scheduler and integrator 104 stateWriters int // [eth/63] Number of running state DB writer goroutines 105 106 resultCache []*fetchResult // Downloaded but not yet delivered fetch results 107 resultOffset uint64 // Offset of the first cached fetch result in the block chain 108 109 lock *sync.Mutex 110 active *sync.Cond 111 closed bool 112 } 113 114 // newQueue creates a new download queue for scheduling block retrieval. 115 func newQueue(stateDb ethdb.Database) *queue { 116 lock := new(sync.Mutex) 117 return &queue{ 118 headerPendPool: make(map[string]*fetchRequest), 119 headerContCh: make(chan bool), 120 blockTaskPool: make(map[common.Hash]*types.Header), 121 blockTaskQueue: prque.New(), 122 blockPendPool: make(map[string]*fetchRequest), 123 blockDonePool: make(map[common.Hash]struct{}), 124 receiptTaskPool: make(map[common.Hash]*types.Header), 125 receiptTaskQueue: prque.New(), 126 receiptPendPool: make(map[string]*fetchRequest), 127 receiptDonePool: make(map[common.Hash]struct{}), 128 stateTaskPool: make(map[common.Hash]int), 129 stateTaskQueue: prque.New(), 130 statePendPool: make(map[string]*fetchRequest), 131 stateDatabase: stateDb, 132 resultCache: make([]*fetchResult, blockCacheLimit), 133 active: sync.NewCond(lock), 134 lock: lock, 135 } 136 } 137 138 // Reset clears out the queue contents. 139 func (q *queue) Reset() { 140 q.lock.Lock() 141 defer q.lock.Unlock() 142 143 q.closed = false 144 q.mode = FullSync 145 q.fastSyncPivot = 0 146 147 q.headerHead = common.Hash{} 148 149 q.headerPendPool = make(map[string]*fetchRequest) 150 151 q.blockTaskPool = make(map[common.Hash]*types.Header) 152 q.blockTaskQueue.Reset() 153 q.blockPendPool = make(map[string]*fetchRequest) 154 q.blockDonePool = make(map[common.Hash]struct{}) 155 156 q.receiptTaskPool = make(map[common.Hash]*types.Header) 157 q.receiptTaskQueue.Reset() 158 q.receiptPendPool = make(map[string]*fetchRequest) 159 q.receiptDonePool = make(map[common.Hash]struct{}) 160 161 q.stateTaskIndex = 0 162 q.stateTaskPool = make(map[common.Hash]int) 163 q.stateTaskQueue.Reset() 164 q.statePendPool = make(map[string]*fetchRequest) 165 q.stateScheduler = nil 166 167 q.resultCache = make([]*fetchResult, blockCacheLimit) 168 q.resultOffset = 0 169 } 170 171 // Close marks the end of the sync, unblocking WaitResults. 172 // It may be called even if the queue is already closed. 173 func (q *queue) Close() { 174 q.lock.Lock() 175 q.closed = true 176 q.lock.Unlock() 177 q.active.Broadcast() 178 } 179 180 // PendingHeaders retrieves the number of header requests pending for retrieval. 181 func (q *queue) PendingHeaders() int { 182 q.lock.Lock() 183 defer q.lock.Unlock() 184 185 return q.headerTaskQueue.Size() 186 } 187 188 // PendingBlocks retrieves the number of block (body) requests pending for retrieval. 189 func (q *queue) PendingBlocks() int { 190 q.lock.Lock() 191 defer q.lock.Unlock() 192 193 return q.blockTaskQueue.Size() 194 } 195 196 // PendingReceipts retrieves the number of block receipts pending for retrieval. 197 func (q *queue) PendingReceipts() int { 198 q.lock.Lock() 199 defer q.lock.Unlock() 200 201 return q.receiptTaskQueue.Size() 202 } 203 204 // PendingNodeData retrieves the number of node data entries pending for retrieval. 205 func (q *queue) PendingNodeData() int { 206 q.lock.Lock() 207 defer q.lock.Unlock() 208 209 return q.pendingNodeDataLocked() 210 } 211 212 // pendingNodeDataLocked retrieves the number of node data entries pending for retrieval. 213 // The caller must hold q.lock. 214 func (q *queue) pendingNodeDataLocked() int { 215 var n int 216 if q.stateScheduler != nil { 217 n = q.stateScheduler.Pending() 218 } 219 // Ensure that PendingNodeData doesn't return 0 until all state is written. 220 if q.stateWriters > 0 { 221 n++ 222 } 223 return n 224 } 225 226 // InFlightHeaders retrieves whether there are header fetch requests currently 227 // in flight. 228 func (q *queue) InFlightHeaders() bool { 229 q.lock.Lock() 230 defer q.lock.Unlock() 231 232 return len(q.headerPendPool) > 0 233 } 234 235 // InFlightBlocks retrieves whether there are block fetch requests currently in 236 // flight. 237 func (q *queue) InFlightBlocks() bool { 238 q.lock.Lock() 239 defer q.lock.Unlock() 240 241 return len(q.blockPendPool) > 0 242 } 243 244 // InFlightReceipts retrieves whether there are receipt fetch requests currently 245 // in flight. 246 func (q *queue) InFlightReceipts() bool { 247 q.lock.Lock() 248 defer q.lock.Unlock() 249 250 return len(q.receiptPendPool) > 0 251 } 252 253 // InFlightNodeData retrieves whether there are node data entry fetch requests 254 // currently in flight. 255 func (q *queue) InFlightNodeData() bool { 256 q.lock.Lock() 257 defer q.lock.Unlock() 258 259 return len(q.statePendPool)+q.stateWriters > 0 260 } 261 262 // Idle returns if the queue is fully idle or has some data still inside. This 263 // method is used by the tester to detect termination events. 264 func (q *queue) Idle() bool { 265 q.lock.Lock() 266 defer q.lock.Unlock() 267 268 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size() 269 pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool) 270 cached := len(q.blockDonePool) + len(q.receiptDonePool) 271 272 if q.stateScheduler != nil { 273 queued += q.stateScheduler.Pending() 274 } 275 return (queued + pending + cached) == 0 276 } 277 278 // FastSyncPivot retrieves the currently used fast sync pivot point. 279 func (q *queue) FastSyncPivot() uint64 { 280 q.lock.Lock() 281 defer q.lock.Unlock() 282 283 return q.fastSyncPivot 284 } 285 286 // ShouldThrottleBlocks checks if the download should be throttled (active block (body) 287 // fetches exceed block cache). 288 func (q *queue) ShouldThrottleBlocks() bool { 289 q.lock.Lock() 290 defer q.lock.Unlock() 291 292 // Calculate the currently in-flight block (body) requests 293 pending := 0 294 for _, request := range q.blockPendPool { 295 pending += len(request.Hashes) + len(request.Headers) 296 } 297 // Throttle if more blocks (bodies) are in-flight than free space in the cache 298 return pending >= len(q.resultCache)-len(q.blockDonePool) 299 } 300 301 // ShouldThrottleReceipts checks if the download should be throttled (active receipt 302 // fetches exceed block cache). 303 func (q *queue) ShouldThrottleReceipts() bool { 304 q.lock.Lock() 305 defer q.lock.Unlock() 306 307 // Calculate the currently in-flight receipt requests 308 pending := 0 309 for _, request := range q.receiptPendPool { 310 pending += len(request.Headers) 311 } 312 // Throttle if more receipts are in-flight than free space in the cache 313 return pending >= len(q.resultCache)-len(q.receiptDonePool) 314 } 315 316 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill 317 // up an already retrieved header skeleton. 318 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { 319 q.lock.Lock() 320 defer q.lock.Unlock() 321 322 // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) 323 if q.headerResults != nil { 324 panic("skeleton assembly already in progress") 325 } 326 // Shedule all the header retrieval tasks for the skeleton assembly 327 q.headerTaskPool = make(map[uint64]*types.Header) 328 q.headerTaskQueue = prque.New() 329 q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains 330 q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) 331 q.headerProced = 0 332 q.headerOffset = from 333 q.headerContCh = make(chan bool, 1) 334 335 for i, header := range skeleton { 336 index := from + uint64(i*MaxHeaderFetch) 337 338 q.headerTaskPool[index] = header 339 q.headerTaskQueue.Push(index, -float32(index)) 340 } 341 } 342 343 // RetrieveHeaders retrieves the header chain assemble based on the scheduled 344 // skeleton. 345 func (q *queue) RetrieveHeaders() ([]*types.Header, int) { 346 q.lock.Lock() 347 defer q.lock.Unlock() 348 349 headers, proced := q.headerResults, q.headerProced 350 q.headerResults, q.headerProced = nil, 0 351 352 return headers, proced 353 } 354 355 // Schedule adds a set of headers for the download queue for scheduling, returning 356 // the new headers encountered. 357 func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { 358 q.lock.Lock() 359 defer q.lock.Unlock() 360 361 // Insert all the headers prioritised by the contained block number 362 inserts := make([]*types.Header, 0, len(headers)) 363 for _, header := range headers { 364 // Make sure chain order is honoured and preserved throughout 365 hash := header.Hash() 366 if header.Number == nil || header.Number.Uint64() != from { 367 log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) 368 break 369 } 370 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 371 log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 372 break 373 } 374 // Make sure no duplicate requests are executed 375 if _, ok := q.blockTaskPool[hash]; ok { 376 log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) 377 continue 378 } 379 if _, ok := q.receiptTaskPool[hash]; ok { 380 log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) 381 continue 382 } 383 // Queue the header for content retrieval 384 q.blockTaskPool[hash] = header 385 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 386 387 if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { 388 // Fast phase of the fast sync, retrieve receipts too 389 q.receiptTaskPool[hash] = header 390 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 391 } 392 if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot { 393 // Pivoting point of the fast sync, switch the state retrieval to this 394 log.Debug("Switching state downloads to new block", "number", header.Number, "hash", hash) 395 396 q.stateTaskIndex = 0 397 q.stateTaskPool = make(map[common.Hash]int) 398 q.stateTaskQueue.Reset() 399 for _, req := range q.statePendPool { 400 req.Hashes = make(map[common.Hash]int) // Make sure executing requests fail, but don't disappear 401 } 402 403 q.stateScheduler = state.NewStateSync(header.Root, q.stateDatabase) 404 } 405 inserts = append(inserts, header) 406 q.headerHead = hash 407 from++ 408 } 409 return inserts 410 } 411 412 // WaitResults retrieves and permanently removes a batch of fetch 413 // results from the cache. the result slice will be empty if the queue 414 // has been closed. 415 func (q *queue) WaitResults() []*fetchResult { 416 q.lock.Lock() 417 defer q.lock.Unlock() 418 419 nproc := q.countProcessableItems() 420 for nproc == 0 && !q.closed { 421 q.active.Wait() 422 nproc = q.countProcessableItems() 423 } 424 results := make([]*fetchResult, nproc) 425 copy(results, q.resultCache[:nproc]) 426 if len(results) > 0 { 427 // Mark results as done before dropping them from the cache. 428 for _, result := range results { 429 hash := result.Header.Hash() 430 delete(q.blockDonePool, hash) 431 delete(q.receiptDonePool, hash) 432 } 433 // Delete the results from the cache and clear the tail. 434 copy(q.resultCache, q.resultCache[nproc:]) 435 for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ { 436 q.resultCache[i] = nil 437 } 438 // Advance the expected block number of the first cache entry. 439 q.resultOffset += uint64(nproc) 440 } 441 return results 442 } 443 444 // countProcessableItems counts the processable items. 445 func (q *queue) countProcessableItems() int { 446 for i, result := range q.resultCache { 447 // Don't process incomplete or unavailable items. 448 if result == nil || result.Pending > 0 { 449 return i 450 } 451 // Special handling for the fast-sync pivot block: 452 if q.mode == FastSync { 453 bnum := result.Header.Number.Uint64() 454 if bnum == q.fastSyncPivot { 455 // If the state of the pivot block is not 456 // available yet, we cannot proceed and return 0. 457 // 458 // Stop before processing the pivot block to ensure that 459 // resultCache has space for fsHeaderForceVerify items. Not 460 // doing this could leave us unable to download the required 461 // amount of headers. 462 if i > 0 || len(q.stateTaskPool) > 0 || q.pendingNodeDataLocked() > 0 { 463 return i 464 } 465 for j := 0; j < fsHeaderForceVerify; j++ { 466 if i+j+1 >= len(q.resultCache) || q.resultCache[i+j+1] == nil { 467 return i 468 } 469 } 470 } 471 // If we're just the fast sync pivot, stop as well 472 // because the following batch needs different insertion. 473 // This simplifies handling the switchover in d.process. 474 if bnum == q.fastSyncPivot+1 && i > 0 { 475 return i 476 } 477 } 478 } 479 return len(q.resultCache) 480 } 481 482 // ReserveHeaders reserves a set of headers for the given peer, skipping any 483 // previously failed batches. 484 func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest { 485 q.lock.Lock() 486 defer q.lock.Unlock() 487 488 // Short circuit if the peer's already downloading something (sanity check to 489 // not corrupt state) 490 if _, ok := q.headerPendPool[p.id]; ok { 491 return nil 492 } 493 // Retrieve a batch of hashes, skipping previously failed ones 494 send, skip := uint64(0), []uint64{} 495 for send == 0 && !q.headerTaskQueue.Empty() { 496 from, _ := q.headerTaskQueue.Pop() 497 if q.headerPeerMiss[p.id] != nil { 498 if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { 499 skip = append(skip, from.(uint64)) 500 continue 501 } 502 } 503 send = from.(uint64) 504 } 505 // Merge all the skipped batches back 506 for _, from := range skip { 507 q.headerTaskQueue.Push(from, -float32(from)) 508 } 509 // Assemble and return the block download request 510 if send == 0 { 511 return nil 512 } 513 request := &fetchRequest{ 514 Peer: p, 515 From: send, 516 Time: time.Now(), 517 } 518 q.headerPendPool[p.id] = request 519 return request 520 } 521 522 // ReserveNodeData reserves a set of node data hashes for the given peer, skipping 523 // any previously failed download. 524 func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest { 525 // Create a task generator to fetch status-fetch tasks if all schedules ones are done 526 generator := func(max int) { 527 if q.stateScheduler != nil { 528 for _, hash := range q.stateScheduler.Missing(max) { 529 q.stateTaskPool[hash] = q.stateTaskIndex 530 q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) 531 q.stateTaskIndex++ 532 } 533 } 534 } 535 q.lock.Lock() 536 defer q.lock.Unlock() 537 538 return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, maxInFlightStates) 539 } 540 541 // reserveHashes reserves a set of hashes for the given peer, skipping previously 542 // failed ones. 543 // 544 // Note, this method expects the queue lock to be already held for writing. The 545 // reason the lock is not obtained in here is because the parameters already need 546 // to access the queue, so they already need a lock anyway. 547 func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, taskGen func(int), pendPool map[string]*fetchRequest, maxPending int) *fetchRequest { 548 // Short circuit if the peer's already downloading something (sanity check to 549 // not corrupt state) 550 if _, ok := pendPool[p.id]; ok { 551 return nil 552 } 553 // Calculate an upper limit on the hashes we might fetch (i.e. throttling) 554 allowance := maxPending 555 if allowance > 0 { 556 for _, request := range pendPool { 557 allowance -= len(request.Hashes) 558 } 559 } 560 // If there's a task generator, ask it to fill our task queue 561 if taskGen != nil && taskQueue.Size() < allowance { 562 taskGen(allowance - taskQueue.Size()) 563 } 564 if taskQueue.Empty() { 565 return nil 566 } 567 // Retrieve a batch of hashes, skipping previously failed ones 568 send := make(map[common.Hash]int) 569 skip := make(map[common.Hash]int) 570 571 for proc := 0; (allowance == 0 || proc < allowance) && len(send) < count && !taskQueue.Empty(); proc++ { 572 hash, priority := taskQueue.Pop() 573 if p.Lacks(hash.(common.Hash)) { 574 skip[hash.(common.Hash)] = int(priority) 575 } else { 576 send[hash.(common.Hash)] = int(priority) 577 } 578 } 579 // Merge all the skipped hashes back 580 for hash, index := range skip { 581 taskQueue.Push(hash, float32(index)) 582 } 583 // Assemble and return the block download request 584 if len(send) == 0 { 585 return nil 586 } 587 request := &fetchRequest{ 588 Peer: p, 589 Hashes: send, 590 Time: time.Now(), 591 } 592 pendPool[p.id] = request 593 594 return request 595 } 596 597 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 598 // previously failed downloads. Beside the next batch of needed fetches, it also 599 // returns a flag whether empty blocks were queued requiring processing. 600 func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) { 601 isNoop := func(header *types.Header) bool { 602 return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash 603 } 604 q.lock.Lock() 605 defer q.lock.Unlock() 606 607 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop) 608 } 609 610 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 611 // any previously failed downloads. Beside the next batch of needed fetches, it 612 // also returns a flag whether empty receipts were queued requiring importing. 613 func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error) { 614 isNoop := func(header *types.Header) bool { 615 return header.ReceiptHash == types.EmptyRootHash 616 } 617 q.lock.Lock() 618 defer q.lock.Unlock() 619 620 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop) 621 } 622 623 // reserveHeaders reserves a set of data download operations for a given peer, 624 // skipping any previously failed ones. This method is a generic version used 625 // by the individual special reservation functions. 626 // 627 // Note, this method expects the queue lock to be already held for writing. The 628 // reason the lock is not obtained in here is because the parameters already need 629 // to access the queue, so they already need a lock anyway. 630 func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 631 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) { 632 // Short circuit if the pool has been depleted, or if the peer's already 633 // downloading something (sanity check not to corrupt state) 634 if taskQueue.Empty() { 635 return nil, false, nil 636 } 637 if _, ok := pendPool[p.id]; ok { 638 return nil, false, nil 639 } 640 // Calculate an upper limit on the items we might fetch (i.e. throttling) 641 space := len(q.resultCache) - len(donePool) 642 for _, request := range pendPool { 643 space -= len(request.Headers) 644 } 645 // Retrieve a batch of tasks, skipping previously failed ones 646 send := make([]*types.Header, 0, count) 647 skip := make([]*types.Header, 0) 648 649 progress := false 650 for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { 651 header := taskQueue.PopItem().(*types.Header) 652 653 // If we're the first to request this task, initialise the result container 654 index := int(header.Number.Int64() - int64(q.resultOffset)) 655 if index >= len(q.resultCache) || index < 0 { 656 common.Report("index allocation went beyond available resultCache space") 657 return nil, false, errInvalidChain 658 } 659 if q.resultCache[index] == nil { 660 components := 1 661 if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { 662 components = 2 663 } 664 q.resultCache[index] = &fetchResult{ 665 Pending: components, 666 Header: header, 667 } 668 } 669 // If this fetch task is a noop, skip this fetch operation 670 if isNoop(header) { 671 donePool[header.Hash()] = struct{}{} 672 delete(taskPool, header.Hash()) 673 674 space, proc = space-1, proc-1 675 q.resultCache[index].Pending-- 676 progress = true 677 continue 678 } 679 // Otherwise unless the peer is known not to have the data, add to the retrieve list 680 if p.Lacks(header.Hash()) { 681 skip = append(skip, header) 682 } else { 683 send = append(send, header) 684 } 685 } 686 // Merge all the skipped headers back 687 for _, header := range skip { 688 taskQueue.Push(header, -float32(header.Number.Uint64())) 689 } 690 if progress { 691 // Wake WaitResults, resultCache was modified 692 q.active.Signal() 693 } 694 // Assemble and return the block download request 695 if len(send) == 0 { 696 return nil, progress, nil 697 } 698 request := &fetchRequest{ 699 Peer: p, 700 Headers: send, 701 Time: time.Now(), 702 } 703 pendPool[p.id] = request 704 705 return request, progress, nil 706 } 707 708 // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. 709 func (q *queue) CancelHeaders(request *fetchRequest) { 710 q.cancel(request, q.headerTaskQueue, q.headerPendPool) 711 } 712 713 // CancelBodies aborts a body fetch request, returning all pending headers to the 714 // task queue. 715 func (q *queue) CancelBodies(request *fetchRequest) { 716 q.cancel(request, q.blockTaskQueue, q.blockPendPool) 717 } 718 719 // CancelReceipts aborts a body fetch request, returning all pending headers to 720 // the task queue. 721 func (q *queue) CancelReceipts(request *fetchRequest) { 722 q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) 723 } 724 725 // CancelNodeData aborts a node state data fetch request, returning all pending 726 // hashes to the task queue. 727 func (q *queue) CancelNodeData(request *fetchRequest) { 728 q.cancel(request, q.stateTaskQueue, q.statePendPool) 729 } 730 731 // Cancel aborts a fetch request, returning all pending hashes to the task queue. 732 func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { 733 q.lock.Lock() 734 defer q.lock.Unlock() 735 736 if request.From > 0 { 737 taskQueue.Push(request.From, -float32(request.From)) 738 } 739 for hash, index := range request.Hashes { 740 taskQueue.Push(hash, float32(index)) 741 } 742 for _, header := range request.Headers { 743 taskQueue.Push(header, -float32(header.Number.Uint64())) 744 } 745 delete(pendPool, request.Peer.id) 746 } 747 748 // Revoke cancels all pending requests belonging to a given peer. This method is 749 // meant to be called during a peer drop to quickly reassign owned data fetches 750 // to remaining nodes. 751 func (q *queue) Revoke(peerId string) { 752 q.lock.Lock() 753 defer q.lock.Unlock() 754 755 if request, ok := q.blockPendPool[peerId]; ok { 756 for _, header := range request.Headers { 757 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 758 } 759 delete(q.blockPendPool, peerId) 760 } 761 if request, ok := q.receiptPendPool[peerId]; ok { 762 for _, header := range request.Headers { 763 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 764 } 765 delete(q.receiptPendPool, peerId) 766 } 767 if request, ok := q.statePendPool[peerId]; ok { 768 for hash, index := range request.Hashes { 769 q.stateTaskQueue.Push(hash, float32(index)) 770 } 771 delete(q.statePendPool, peerId) 772 } 773 } 774 775 // ExpireHeaders checks for in flight requests that exceeded a timeout allowance, 776 // canceling them and returning the responsible peers for penalisation. 777 func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { 778 q.lock.Lock() 779 defer q.lock.Unlock() 780 781 return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) 782 } 783 784 // ExpireBodies checks for in flight block body requests that exceeded a timeout 785 // allowance, canceling them and returning the responsible peers for penalisation. 786 func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { 787 q.lock.Lock() 788 defer q.lock.Unlock() 789 790 return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) 791 } 792 793 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 794 // allowance, canceling them and returning the responsible peers for penalisation. 795 func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { 796 q.lock.Lock() 797 defer q.lock.Unlock() 798 799 return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) 800 } 801 802 // ExpireNodeData checks for in flight node data requests that exceeded a timeout 803 // allowance, canceling them and returning the responsible peers for penalisation. 804 func (q *queue) ExpireNodeData(timeout time.Duration) map[string]int { 805 q.lock.Lock() 806 defer q.lock.Unlock() 807 808 return q.expire(timeout, q.statePendPool, q.stateTaskQueue, stateTimeoutMeter) 809 } 810 811 // expire is the generic check that move expired tasks from a pending pool back 812 // into a task pool, returning all entities caught with expired tasks. 813 // 814 // Note, this method expects the queue lock to be already held. The 815 // reason the lock is not obtained in here is because the parameters already need 816 // to access the queue, so they already need a lock anyway. 817 func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { 818 // Iterate over the expired requests and return each to the queue 819 expiries := make(map[string]int) 820 for id, request := range pendPool { 821 if time.Since(request.Time) > timeout { 822 // Update the metrics with the timeout 823 timeoutMeter.Mark(1) 824 825 // Return any non satisfied requests to the pool 826 if request.From > 0 { 827 taskQueue.Push(request.From, -float32(request.From)) 828 } 829 for hash, index := range request.Hashes { 830 taskQueue.Push(hash, float32(index)) 831 } 832 for _, header := range request.Headers { 833 taskQueue.Push(header, -float32(header.Number.Uint64())) 834 } 835 // Add the peer to the expiry report along the the number of failed requests 836 expirations := len(request.Hashes) 837 if expirations < len(request.Headers) { 838 expirations = len(request.Headers) 839 } 840 expiries[id] = expirations 841 } 842 } 843 // Remove the expired requests from the pending pool 844 for id := range expiries { 845 delete(pendPool, id) 846 } 847 return expiries 848 } 849 850 // DeliverHeaders injects a header retrieval response into the header results 851 // cache. This method either accepts all headers it received, or none of them 852 // if they do not map correctly to the skeleton. 853 // 854 // If the headers are accepted, the method makes an attempt to deliver the set 855 // of ready headers to the processor to keep the pipeline full. However it will 856 // not block to prevent stalling other pending deliveries. 857 func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { 858 q.lock.Lock() 859 defer q.lock.Unlock() 860 861 // Short circuit if the data was never requested 862 request := q.headerPendPool[id] 863 if request == nil { 864 return 0, errNoFetchesPending 865 } 866 headerReqTimer.UpdateSince(request.Time) 867 delete(q.headerPendPool, id) 868 869 // Ensure headers can be mapped onto the skeleton chain 870 target := q.headerTaskPool[request.From].Hash() 871 872 accepted := len(headers) == MaxHeaderFetch 873 if accepted { 874 if headers[0].Number.Uint64() != request.From { 875 log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From) 876 accepted = false 877 } else if headers[len(headers)-1].Hash() != target { 878 log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) 879 accepted = false 880 } 881 } 882 if accepted { 883 for i, header := range headers[1:] { 884 hash := header.Hash() 885 if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { 886 log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want) 887 accepted = false 888 break 889 } 890 if headers[i].Hash() != header.ParentHash { 891 log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash) 892 accepted = false 893 break 894 } 895 } 896 } 897 // If the batch of headers wasn't accepted, mark as unavailable 898 if !accepted { 899 log.Trace("Skeleton filling not accepted", "peer", id, "from", request.From) 900 901 miss := q.headerPeerMiss[id] 902 if miss == nil { 903 q.headerPeerMiss[id] = make(map[uint64]struct{}) 904 miss = q.headerPeerMiss[id] 905 } 906 miss[request.From] = struct{}{} 907 908 q.headerTaskQueue.Push(request.From, -float32(request.From)) 909 return 0, errors.New("delivery not accepted") 910 } 911 // Clean up a successful fetch and try to deliver any sub-results 912 copy(q.headerResults[request.From-q.headerOffset:], headers) 913 delete(q.headerTaskPool, request.From) 914 915 ready := 0 916 for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { 917 ready += MaxHeaderFetch 918 } 919 if ready > 0 { 920 // Headers are ready for delivery, gather them and push forward (non blocking) 921 process := make([]*types.Header, ready) 922 copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) 923 924 select { 925 case headerProcCh <- process: 926 log.Trace("Pre-scheduled new headers", "peer", id, "count", len(process), "from", process[0].Number) 927 q.headerProced += len(process) 928 default: 929 } 930 } 931 // Check for termination and return 932 if len(q.headerTaskPool) == 0 { 933 q.headerContCh <- false 934 } 935 return len(headers), nil 936 } 937 938 // DeliverBodies injects a block body retrieval response into the results queue. 939 // The method returns the number of blocks bodies accepted from the delivery and 940 // also wakes any threads waiting for data delivery. 941 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { 942 q.lock.Lock() 943 defer q.lock.Unlock() 944 945 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 946 if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { 947 return errInvalidBody 948 } 949 result.Transactions = txLists[index] 950 result.Uncles = uncleLists[index] 951 return nil 952 } 953 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct) 954 } 955 956 // DeliverReceipts injects a receipt retrieval response into the results queue. 957 // The method returns the number of transaction receipts accepted from the delivery 958 // and also wakes any threads waiting for data delivery. 959 func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { 960 q.lock.Lock() 961 defer q.lock.Unlock() 962 963 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 964 if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { 965 return errInvalidReceipt 966 } 967 result.Receipts = receiptList[index] 968 return nil 969 } 970 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct) 971 } 972 973 // deliver injects a data retrieval response into the results queue. 974 // 975 // Note, this method expects the queue lock to be already held for writing. The 976 // reason the lock is not obtained in here is because the parameters already need 977 // to access the queue, so they already need a lock anyway. 978 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 979 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer, 980 results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) { 981 982 // Short circuit if the data was never requested 983 request := pendPool[id] 984 if request == nil { 985 return 0, errNoFetchesPending 986 } 987 reqTimer.UpdateSince(request.Time) 988 delete(pendPool, id) 989 990 // If no data items were retrieved, mark them as unavailable for the origin peer 991 if results == 0 { 992 for _, header := range request.Headers { 993 request.Peer.MarkLacking(header.Hash()) 994 } 995 } 996 // Assemble each of the results with their headers and retrieved data parts 997 var ( 998 accepted int 999 failure error 1000 useful bool 1001 ) 1002 for i, header := range request.Headers { 1003 // Short circuit assembly if no more fetch results are found 1004 if i >= results { 1005 break 1006 } 1007 // Reconstruct the next result if contents match up 1008 index := int(header.Number.Int64() - int64(q.resultOffset)) 1009 if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { 1010 failure = errInvalidChain 1011 break 1012 } 1013 if err := reconstruct(header, i, q.resultCache[index]); err != nil { 1014 failure = err 1015 break 1016 } 1017 donePool[header.Hash()] = struct{}{} 1018 q.resultCache[index].Pending-- 1019 useful = true 1020 accepted++ 1021 1022 // Clean up a successful fetch 1023 request.Headers[i] = nil 1024 delete(taskPool, header.Hash()) 1025 } 1026 // Return all failed or missing fetches to the queue 1027 for _, header := range request.Headers { 1028 if header != nil { 1029 taskQueue.Push(header, -float32(header.Number.Uint64())) 1030 } 1031 } 1032 // Wake up WaitResults 1033 if accepted > 0 { 1034 q.active.Signal() 1035 } 1036 // If none of the data was good, it's a stale delivery 1037 switch { 1038 case failure == nil || failure == errInvalidChain: 1039 return accepted, failure 1040 case useful: 1041 return accepted, fmt.Errorf("partial failure: %v", failure) 1042 default: 1043 return accepted, errStaleDelivery 1044 } 1045 } 1046 1047 // DeliverNodeData injects a node state data retrieval response into the queue. 1048 // The method returns the number of node state accepted from the delivery. 1049 func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(int, bool, error)) (int, error) { 1050 q.lock.Lock() 1051 defer q.lock.Unlock() 1052 1053 // Short circuit if the data was never requested 1054 request := q.statePendPool[id] 1055 if request == nil { 1056 return 0, errNoFetchesPending 1057 } 1058 stateReqTimer.UpdateSince(request.Time) 1059 delete(q.statePendPool, id) 1060 1061 // If no data was retrieved, mark their hashes as unavailable for the origin peer 1062 if len(data) == 0 { 1063 for hash := range request.Hashes { 1064 request.Peer.MarkLacking(hash) 1065 } 1066 } 1067 // Iterate over the downloaded data and verify each of them 1068 errs := make([]error, 0) 1069 process := []trie.SyncResult{} 1070 for _, blob := range data { 1071 // Skip any state trie entries that were not requested 1072 hash := common.BytesToHash(crypto.Keccak256(blob)) 1073 if _, ok := request.Hashes[hash]; !ok { 1074 errs = append(errs, fmt.Errorf("non-requested state data %x", hash)) 1075 continue 1076 } 1077 // Inject the next state trie item into the processing queue 1078 process = append(process, trie.SyncResult{Hash: hash, Data: blob}) 1079 delete(request.Hashes, hash) 1080 delete(q.stateTaskPool, hash) 1081 } 1082 // Return all failed or missing fetches to the queue 1083 for hash, index := range request.Hashes { 1084 q.stateTaskQueue.Push(hash, float32(index)) 1085 } 1086 if q.stateScheduler == nil { 1087 return 0, errNoFetchesPending 1088 } 1089 1090 // Run valid nodes through the trie download scheduler. It writes completed nodes to a 1091 // batch, which is committed asynchronously. This may lead to over-fetches because the 1092 // scheduler treats everything as written after Process has returned, but it's 1093 // unlikely to be an issue in practice. 1094 batch := q.stateDatabase.NewBatch() 1095 progressed, nproc, procerr := q.stateScheduler.Process(process, batch) 1096 q.stateWriters += 1 1097 go func() { 1098 if procerr == nil { 1099 nproc = len(process) 1100 procerr = batch.Write() 1101 } 1102 // Return processing errors through the callback so the sync gets canceled. The 1103 // number of writers is decremented prior to the call so PendingNodeData will 1104 // return zero when the callback runs. 1105 q.lock.Lock() 1106 q.stateWriters -= 1 1107 q.lock.Unlock() 1108 callback(nproc, progressed, procerr) 1109 // Wake up WaitResults after the state has been written because it might be 1110 // waiting for completion of the pivot block's state download. 1111 q.active.Signal() 1112 }() 1113 1114 // If none of the data items were good, it's a stale delivery 1115 switch { 1116 case len(errs) == 0: 1117 return len(process), nil 1118 case len(errs) == len(request.Hashes): 1119 return len(process), errStaleDelivery 1120 default: 1121 return len(process), fmt.Errorf("multiple failures: %v", errs) 1122 } 1123 } 1124 1125 // Prepare configures the result cache to allow accepting and caching inbound 1126 // fetch results. 1127 func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64, head *types.Header) { 1128 q.lock.Lock() 1129 defer q.lock.Unlock() 1130 1131 // Prepare the queue for sync results 1132 if q.resultOffset < offset { 1133 q.resultOffset = offset 1134 } 1135 q.fastSyncPivot = pivot 1136 q.mode = mode 1137 1138 // If long running fast sync, also start up a head stateretrieval immediately 1139 if mode == FastSync && pivot > 0 { 1140 q.stateScheduler = state.NewStateSync(head.Root, q.stateDatabase) 1141 } 1142 }