github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/eth/downloader/queue.go (about) 1 // Copyright 2015 The Spectrum Authors 2 // This file is part of the Spectrum library. 3 // 4 // The Spectrum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The Spectrum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the block download scheduler to collect download tasks and schedule 18 // them in an ordered, and throttled way. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "sync" 26 "time" 27 28 "github.com/SmartMeshFoundation/Spectrum/common" 29 "github.com/SmartMeshFoundation/Spectrum/core/types" 30 "github.com/SmartMeshFoundation/Spectrum/log" 31 "github.com/rcrowley/go-metrics" 32 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 33 ) 34 35 var blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download 36 37 var ( 38 errNoFetchesPending = errors.New("no fetches pending") 39 errStaleDelivery = errors.New("stale delivery") 40 ) 41 42 // fetchRequest is a currently running data retrieval operation. 43 type fetchRequest struct { 44 Peer *peerConnection // Peer to which the request was sent 45 From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) 46 Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority) 47 Headers []*types.Header // [eth/62] Requested headers, sorted by request order 48 Time time.Time // Time when the request was made 49 } 50 51 // fetchResult is a struct collecting partial results from data fetchers until 52 // all outstanding pieces complete and the result as a whole can be processed. 53 type fetchResult struct { 54 Pending int // Number of data fetches still pending 55 56 Header *types.Header 57 Uncles []*types.Header 58 Transactions types.Transactions 59 Receipts types.Receipts 60 } 61 62 // queue represents hashes that are either need fetching or are being fetched 63 type queue struct { 64 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 65 fastSyncPivot uint64 // Block number where the fast sync pivots into archive synchronisation mode 66 67 headerHead common.Hash // [eth/62] Hash of the last queued header to verify order 68 69 // Headers are "special", they download in batches, supported by a skeleton chain 70 headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers 71 headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for 72 headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable 73 headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations 74 headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers 75 headerProced int // [eth/62] Number of headers already processed from the results 76 headerOffset uint64 // [eth/62] Number of the first header in the result cache 77 headerContCh chan bool // [eth/62] Channel to notify when header download finishes 78 79 // All data retrievals below are based on an already assembles header chain 80 blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers 81 blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for 82 blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations 83 blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches 84 85 receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers 86 receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for 87 receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations 88 receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches 89 90 resultCache []*fetchResult // Downloaded but not yet delivered fetch results 91 resultOffset uint64 // Offset of the first cached fetch result in the block chain 92 93 lock *sync.Mutex 94 active *sync.Cond 95 closed bool 96 } 97 98 // newQueue creates a new download queue for scheduling block retrieval. 99 func newQueue() *queue { 100 lock := new(sync.Mutex) 101 return &queue{ 102 headerPendPool: make(map[string]*fetchRequest), 103 headerContCh: make(chan bool), 104 blockTaskPool: make(map[common.Hash]*types.Header), 105 blockTaskQueue: prque.New(), 106 blockPendPool: make(map[string]*fetchRequest), 107 blockDonePool: make(map[common.Hash]struct{}), 108 receiptTaskPool: make(map[common.Hash]*types.Header), 109 receiptTaskQueue: prque.New(), 110 receiptPendPool: make(map[string]*fetchRequest), 111 receiptDonePool: make(map[common.Hash]struct{}), 112 resultCache: make([]*fetchResult, blockCacheLimit), 113 active: sync.NewCond(lock), 114 lock: lock, 115 } 116 } 117 118 // Reset clears out the queue contents. 119 func (q *queue) Reset() { 120 q.lock.Lock() 121 defer q.lock.Unlock() 122 123 q.closed = false 124 q.mode = FullSync 125 q.fastSyncPivot = 0 126 127 q.headerHead = common.Hash{} 128 129 q.headerPendPool = make(map[string]*fetchRequest) 130 131 q.blockTaskPool = make(map[common.Hash]*types.Header) 132 q.blockTaskQueue.Reset() 133 q.blockPendPool = make(map[string]*fetchRequest) 134 q.blockDonePool = make(map[common.Hash]struct{}) 135 136 q.receiptTaskPool = make(map[common.Hash]*types.Header) 137 q.receiptTaskQueue.Reset() 138 q.receiptPendPool = make(map[string]*fetchRequest) 139 q.receiptDonePool = make(map[common.Hash]struct{}) 140 141 q.resultCache = make([]*fetchResult, blockCacheLimit) 142 q.resultOffset = 0 143 } 144 145 // Close marks the end of the sync, unblocking WaitResults. 146 // It may be called even if the queue is already closed. 147 func (q *queue) Close() { 148 q.lock.Lock() 149 q.closed = true 150 q.lock.Unlock() 151 q.active.Broadcast() 152 } 153 154 // PendingHeaders retrieves the number of header requests pending for retrieval. 155 func (q *queue) PendingHeaders() int { 156 q.lock.Lock() 157 defer q.lock.Unlock() 158 159 return q.headerTaskQueue.Size() 160 } 161 162 // PendingBlocks retrieves the number of block (body) requests pending for retrieval. 163 func (q *queue) PendingBlocks() int { 164 q.lock.Lock() 165 defer q.lock.Unlock() 166 167 return q.blockTaskQueue.Size() 168 } 169 170 // PendingReceipts retrieves the number of block receipts pending for retrieval. 171 func (q *queue) PendingReceipts() int { 172 q.lock.Lock() 173 defer q.lock.Unlock() 174 175 return q.receiptTaskQueue.Size() 176 } 177 178 // InFlightHeaders retrieves whether there are header fetch requests currently 179 // in flight. 180 func (q *queue) InFlightHeaders() bool { 181 q.lock.Lock() 182 defer q.lock.Unlock() 183 184 return len(q.headerPendPool) > 0 185 } 186 187 // InFlightBlocks retrieves whether there are block fetch requests currently in 188 // flight. 189 func (q *queue) InFlightBlocks() bool { 190 q.lock.Lock() 191 defer q.lock.Unlock() 192 193 return len(q.blockPendPool) > 0 194 } 195 196 // InFlightReceipts retrieves whether there are receipt fetch requests currently 197 // in flight. 198 func (q *queue) InFlightReceipts() bool { 199 q.lock.Lock() 200 defer q.lock.Unlock() 201 202 return len(q.receiptPendPool) > 0 203 } 204 205 // Idle returns if the queue is fully idle or has some data still inside. 206 func (q *queue) Idle() bool { 207 q.lock.Lock() 208 defer q.lock.Unlock() 209 210 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() 211 pending := len(q.blockPendPool) + len(q.receiptPendPool) 212 cached := len(q.blockDonePool) + len(q.receiptDonePool) 213 214 return (queued + pending + cached) == 0 215 } 216 217 // FastSyncPivot retrieves the currently used fast sync pivot point. 218 func (q *queue) FastSyncPivot() uint64 { 219 q.lock.Lock() 220 defer q.lock.Unlock() 221 222 return q.fastSyncPivot 223 } 224 225 // ShouldThrottleBlocks checks if the download should be throttled (active block (body) 226 // fetches exceed block cache). 227 func (q *queue) ShouldThrottleBlocks() bool { 228 q.lock.Lock() 229 defer q.lock.Unlock() 230 231 // Calculate the currently in-flight block (body) requests 232 pending := 0 233 for _, request := range q.blockPendPool { 234 pending += len(request.Hashes) + len(request.Headers) 235 } 236 // Throttle if more blocks (bodies) are in-flight than free space in the cache 237 return pending >= len(q.resultCache)-len(q.blockDonePool) 238 } 239 240 // ShouldThrottleReceipts checks if the download should be throttled (active receipt 241 // fetches exceed block cache). 242 func (q *queue) ShouldThrottleReceipts() bool { 243 q.lock.Lock() 244 defer q.lock.Unlock() 245 246 // Calculate the currently in-flight receipt requests 247 pending := 0 248 for _, request := range q.receiptPendPool { 249 pending += len(request.Headers) 250 } 251 // Throttle if more receipts are in-flight than free space in the cache 252 return pending >= len(q.resultCache)-len(q.receiptDonePool) 253 } 254 255 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill 256 // up an already retrieved header skeleton. 257 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { 258 q.lock.Lock() 259 defer q.lock.Unlock() 260 261 // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) 262 if q.headerResults != nil { 263 panic("skeleton assembly already in progress") 264 } 265 // Shedule all the header retrieval tasks for the skeleton assembly 266 q.headerTaskPool = make(map[uint64]*types.Header) 267 q.headerTaskQueue = prque.New() 268 q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains 269 q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) 270 q.headerProced = 0 271 q.headerOffset = from 272 q.headerContCh = make(chan bool, 1) 273 274 for i, header := range skeleton { 275 index := from + uint64(i*MaxHeaderFetch) 276 277 q.headerTaskPool[index] = header 278 q.headerTaskQueue.Push(index, -float32(index)) 279 } 280 } 281 282 // RetrieveHeaders retrieves the header chain assemble based on the scheduled 283 // skeleton. 284 func (q *queue) RetrieveHeaders() ([]*types.Header, int) { 285 q.lock.Lock() 286 defer q.lock.Unlock() 287 288 headers, proced := q.headerResults, q.headerProced 289 q.headerResults, q.headerProced = nil, 0 290 291 return headers, proced 292 } 293 294 // Schedule adds a set of headers for the download queue for scheduling, returning 295 // the new headers encountered. 296 func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { 297 q.lock.Lock() 298 defer q.lock.Unlock() 299 300 // Insert all the headers prioritised by the contained block number 301 inserts := make([]*types.Header, 0, len(headers)) 302 for _, header := range headers { 303 // Make sure chain order is honoured and preserved throughout 304 hash := header.Hash() 305 if header.Number == nil || header.Number.Uint64() != from { 306 log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) 307 break 308 } 309 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 310 log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 311 break 312 } 313 // Make sure no duplicate requests are executed 314 if _, ok := q.blockTaskPool[hash]; ok { 315 log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) 316 continue 317 } 318 if _, ok := q.receiptTaskPool[hash]; ok { 319 log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) 320 continue 321 } 322 // Queue the header for content retrieval 323 q.blockTaskPool[hash] = header 324 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 325 326 if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { 327 // Fast phase of the fast sync, retrieve receipts too 328 q.receiptTaskPool[hash] = header 329 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 330 } 331 inserts = append(inserts, header) 332 q.headerHead = hash 333 from++ 334 } 335 return inserts 336 } 337 338 // WaitResults retrieves and permanently removes a batch of fetch 339 // results from the cache. the result slice will be empty if the queue 340 // has been closed. 341 func (q *queue) WaitResults() []*fetchResult { 342 q.lock.Lock() 343 defer q.lock.Unlock() 344 345 nproc := q.countProcessableItems() 346 for nproc == 0 && !q.closed { 347 q.active.Wait() 348 nproc = q.countProcessableItems() 349 } 350 results := make([]*fetchResult, nproc) 351 copy(results, q.resultCache[:nproc]) 352 if len(results) > 0 { 353 // Mark results as done before dropping them from the cache. 354 for _, result := range results { 355 hash := result.Header.Hash() 356 delete(q.blockDonePool, hash) 357 delete(q.receiptDonePool, hash) 358 } 359 // Delete the results from the cache and clear the tail. 360 copy(q.resultCache, q.resultCache[nproc:]) 361 for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ { 362 q.resultCache[i] = nil 363 } 364 // Advance the expected block number of the first cache entry. 365 q.resultOffset += uint64(nproc) 366 } 367 return results 368 } 369 370 // countProcessableItems counts the processable items. 371 func (q *queue) countProcessableItems() int { 372 for i, result := range q.resultCache { 373 // Don't process incomplete or unavailable items. 374 if result == nil || result.Pending > 0 { 375 return i 376 } 377 // Stop before processing the pivot block to ensure that 378 // resultCache has space for fsHeaderForceVerify items. Not 379 // doing this could leave us unable to download the required 380 // amount of headers. 381 if q.mode == FastSync && result.Header.Number.Uint64() == q.fastSyncPivot { 382 for j := 0; j < fsHeaderForceVerify; j++ { 383 if i+j+1 >= len(q.resultCache) || q.resultCache[i+j+1] == nil { 384 return i 385 } 386 } 387 } 388 } 389 return len(q.resultCache) 390 } 391 392 // ReserveHeaders reserves a set of headers for the given peer, skipping any 393 // previously failed batches. 394 func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { 395 q.lock.Lock() 396 defer q.lock.Unlock() 397 398 // Short circuit if the peer's already downloading something (sanity check to 399 // not corrupt state) 400 if _, ok := q.headerPendPool[p.id]; ok { 401 return nil 402 } 403 // Retrieve a batch of hashes, skipping previously failed ones 404 send, skip := uint64(0), []uint64{} 405 for send == 0 && !q.headerTaskQueue.Empty() { 406 from, _ := q.headerTaskQueue.Pop() 407 if q.headerPeerMiss[p.id] != nil { 408 if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { 409 skip = append(skip, from.(uint64)) 410 continue 411 } 412 } 413 send = from.(uint64) 414 } 415 // Merge all the skipped batches back 416 for _, from := range skip { 417 q.headerTaskQueue.Push(from, -float32(from)) 418 } 419 // Assemble and return the block download request 420 if send == 0 { 421 return nil 422 } 423 request := &fetchRequest{ 424 Peer: p, 425 From: send, 426 Time: time.Now(), 427 } 428 q.headerPendPool[p.id] = request 429 return request 430 } 431 432 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 433 // previously failed downloads. Beside the next batch of needed fetches, it also 434 // returns a flag whether empty blocks were queued requiring processing. 435 func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, error) { 436 isNoop := func(header *types.Header) bool { 437 return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash 438 } 439 q.lock.Lock() 440 defer q.lock.Unlock() 441 442 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop) 443 } 444 445 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 446 // any previously failed downloads. Beside the next batch of needed fetches, it 447 // also returns a flag whether empty receipts were queued requiring importing. 448 func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, error) { 449 isNoop := func(header *types.Header) bool { 450 return header.ReceiptHash == types.EmptyRootHash 451 } 452 q.lock.Lock() 453 defer q.lock.Unlock() 454 455 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop) 456 } 457 458 // reserveHeaders reserves a set of data download operations for a given peer, 459 // skipping any previously failed ones. This method is a generic version used 460 // by the individual special reservation functions. 461 // 462 // Note, this method expects the queue lock to be already held for writing. The 463 // reason the lock is not obtained in here is because the parameters already need 464 // to access the queue, so they already need a lock anyway. 465 func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 466 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) { 467 // Short circuit if the pool has been depleted, or if the peer's already 468 // downloading something (sanity check not to corrupt state) 469 if taskQueue.Empty() { 470 return nil, false, nil 471 } 472 if _, ok := pendPool[p.id]; ok { 473 return nil, false, nil 474 } 475 // Calculate an upper limit on the items we might fetch (i.e. throttling) 476 space := len(q.resultCache) - len(donePool) 477 for _, request := range pendPool { 478 space -= len(request.Headers) 479 } 480 // Retrieve a batch of tasks, skipping previously failed ones 481 send := make([]*types.Header, 0, count) 482 skip := make([]*types.Header, 0) 483 484 progress := false 485 for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { 486 header := taskQueue.PopItem().(*types.Header) 487 488 // If we're the first to request this task, initialise the result container 489 index := int(header.Number.Int64() - int64(q.resultOffset)) 490 if index >= len(q.resultCache) || index < 0 { 491 common.Report("index allocation went beyond available resultCache space") 492 return nil, false, errInvalidChain 493 } 494 if q.resultCache[index] == nil { 495 components := 1 496 if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { 497 components = 2 498 } 499 q.resultCache[index] = &fetchResult{ 500 Pending: components, 501 Header: header, 502 } 503 } 504 // If this fetch task is a noop, skip this fetch operation 505 if isNoop(header) { 506 donePool[header.Hash()] = struct{}{} 507 delete(taskPool, header.Hash()) 508 509 space, proc = space-1, proc-1 510 q.resultCache[index].Pending-- 511 progress = true 512 continue 513 } 514 // Otherwise unless the peer is known not to have the data, add to the retrieve list 515 if p.Lacks(header.Hash()) { 516 skip = append(skip, header) 517 } else { 518 send = append(send, header) 519 } 520 } 521 // Merge all the skipped headers back 522 for _, header := range skip { 523 taskQueue.Push(header, -float32(header.Number.Uint64())) 524 } 525 if progress { 526 // Wake WaitResults, resultCache was modified 527 q.active.Signal() 528 } 529 // Assemble and return the block download request 530 if len(send) == 0 { 531 return nil, progress, nil 532 } 533 request := &fetchRequest{ 534 Peer: p, 535 Headers: send, 536 Time: time.Now(), 537 } 538 pendPool[p.id] = request 539 540 return request, progress, nil 541 } 542 543 // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. 544 func (q *queue) CancelHeaders(request *fetchRequest) { 545 q.cancel(request, q.headerTaskQueue, q.headerPendPool) 546 } 547 548 // CancelBodies aborts a body fetch request, returning all pending headers to the 549 // task queue. 550 func (q *queue) CancelBodies(request *fetchRequest) { 551 q.cancel(request, q.blockTaskQueue, q.blockPendPool) 552 } 553 554 // CancelReceipts aborts a body fetch request, returning all pending headers to 555 // the task queue. 556 func (q *queue) CancelReceipts(request *fetchRequest) { 557 q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) 558 } 559 560 // Cancel aborts a fetch request, returning all pending hashes to the task queue. 561 func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { 562 q.lock.Lock() 563 defer q.lock.Unlock() 564 565 if request.From > 0 { 566 taskQueue.Push(request.From, -float32(request.From)) 567 } 568 for hash, index := range request.Hashes { 569 taskQueue.Push(hash, float32(index)) 570 } 571 for _, header := range request.Headers { 572 taskQueue.Push(header, -float32(header.Number.Uint64())) 573 } 574 delete(pendPool, request.Peer.id) 575 } 576 577 // Revoke cancels all pending requests belonging to a given peer. This method is 578 // meant to be called during a peer drop to quickly reassign owned data fetches 579 // to remaining nodes. 580 func (q *queue) Revoke(peerId string) { 581 q.lock.Lock() 582 defer q.lock.Unlock() 583 584 if request, ok := q.blockPendPool[peerId]; ok { 585 for _, header := range request.Headers { 586 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 587 } 588 delete(q.blockPendPool, peerId) 589 } 590 if request, ok := q.receiptPendPool[peerId]; ok { 591 for _, header := range request.Headers { 592 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 593 } 594 delete(q.receiptPendPool, peerId) 595 } 596 } 597 598 // ExpireHeaders checks for in flight requests that exceeded a timeout allowance, 599 // canceling them and returning the responsible peers for penalisation. 600 func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { 601 q.lock.Lock() 602 defer q.lock.Unlock() 603 604 return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) 605 } 606 607 // ExpireBodies checks for in flight block body requests that exceeded a timeout 608 // allowance, canceling them and returning the responsible peers for penalisation. 609 func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { 610 q.lock.Lock() 611 defer q.lock.Unlock() 612 613 return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) 614 } 615 616 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 617 // allowance, canceling them and returning the responsible peers for penalisation. 618 func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { 619 q.lock.Lock() 620 defer q.lock.Unlock() 621 622 return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) 623 } 624 625 // expire is the generic check that move expired tasks from a pending pool back 626 // into a task pool, returning all entities caught with expired tasks. 627 // 628 // Note, this method expects the queue lock to be already held. The 629 // reason the lock is not obtained in here is because the parameters already need 630 // to access the queue, so they already need a lock anyway. 631 func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { 632 // Iterate over the expired requests and return each to the queue 633 expiries := make(map[string]int) 634 for id, request := range pendPool { 635 if time.Since(request.Time) > timeout { 636 // Update the metrics with the timeout 637 timeoutMeter.Mark(1) 638 639 // Return any non satisfied requests to the pool 640 if request.From > 0 { 641 taskQueue.Push(request.From, -float32(request.From)) 642 } 643 for hash, index := range request.Hashes { 644 taskQueue.Push(hash, float32(index)) 645 } 646 for _, header := range request.Headers { 647 taskQueue.Push(header, -float32(header.Number.Uint64())) 648 } 649 // Add the peer to the expiry report along the the number of failed requests 650 expirations := len(request.Hashes) 651 if expirations < len(request.Headers) { 652 expirations = len(request.Headers) 653 } 654 expiries[id] = expirations 655 } 656 } 657 // Remove the expired requests from the pending pool 658 for id := range expiries { 659 delete(pendPool, id) 660 } 661 return expiries 662 } 663 664 // DeliverHeaders injects a header retrieval response into the header results 665 // cache. This method either accepts all headers it received, or none of them 666 // if they do not map correctly to the skeleton. 667 // 668 // If the headers are accepted, the method makes an attempt to deliver the set 669 // of ready headers to the processor to keep the pipeline full. However it will 670 // not block to prevent stalling other pending deliveries. 671 func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { 672 q.lock.Lock() 673 defer q.lock.Unlock() 674 675 // Short circuit if the data was never requested 676 request := q.headerPendPool[id] 677 if request == nil { 678 return 0, errNoFetchesPending 679 } 680 headerReqTimer.UpdateSince(request.Time) 681 delete(q.headerPendPool, id) 682 683 // Ensure headers can be mapped onto the skeleton chain 684 target := q.headerTaskPool[request.From].Hash() 685 686 accepted := len(headers) == MaxHeaderFetch 687 if accepted { 688 if headers[0].Number.Uint64() != request.From { 689 log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From) 690 accepted = false 691 } else if headers[len(headers)-1].Hash() != target { 692 log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) 693 accepted = false 694 } 695 } 696 if accepted { 697 for i, header := range headers[1:] { 698 hash := header.Hash() 699 if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { 700 log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want) 701 accepted = false 702 break 703 } 704 if headers[i].Hash() != header.ParentHash { 705 log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash) 706 accepted = false 707 break 708 } 709 } 710 } 711 // If the batch of headers wasn't accepted, mark as unavailable 712 if !accepted { 713 log.Trace("Skeleton filling not accepted", "peer", id, "from", request.From) 714 715 miss := q.headerPeerMiss[id] 716 if miss == nil { 717 q.headerPeerMiss[id] = make(map[uint64]struct{}) 718 miss = q.headerPeerMiss[id] 719 } 720 miss[request.From] = struct{}{} 721 722 q.headerTaskQueue.Push(request.From, -float32(request.From)) 723 return 0, errors.New("delivery not accepted") 724 } 725 // Clean up a successful fetch and try to deliver any sub-results 726 copy(q.headerResults[request.From-q.headerOffset:], headers) 727 delete(q.headerTaskPool, request.From) 728 729 ready := 0 730 for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { 731 ready += MaxHeaderFetch 732 } 733 if ready > 0 { 734 // Headers are ready for delivery, gather them and push forward (non blocking) 735 process := make([]*types.Header, ready) 736 copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) 737 738 select { 739 case headerProcCh <- process: 740 log.Trace("Pre-scheduled new headers", "peer", id, "count", len(process), "from", process[0].Number) 741 q.headerProced += len(process) 742 default: 743 } 744 } 745 // Check for termination and return 746 if len(q.headerTaskPool) == 0 { 747 q.headerContCh <- false 748 } 749 return len(headers), nil 750 } 751 752 // DeliverBodies injects a block body retrieval response into the results queue. 753 // The method returns the number of blocks bodies accepted from the delivery and 754 // also wakes any threads waiting for data delivery. 755 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { 756 q.lock.Lock() 757 defer q.lock.Unlock() 758 759 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 760 if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { 761 return errInvalidBody 762 } 763 result.Transactions = txLists[index] 764 result.Uncles = uncleLists[index] 765 return nil 766 } 767 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct) 768 } 769 770 // DeliverReceipts injects a receipt retrieval response into the results queue. 771 // The method returns the number of transaction receipts accepted from the delivery 772 // and also wakes any threads waiting for data delivery. 773 func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { 774 q.lock.Lock() 775 defer q.lock.Unlock() 776 777 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 778 if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { 779 return errInvalidReceipt 780 } 781 result.Receipts = receiptList[index] 782 return nil 783 } 784 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct) 785 } 786 787 // deliver injects a data retrieval response into the results queue. 788 // 789 // Note, this method expects the queue lock to be already held for writing. The 790 // reason the lock is not obtained in here is because the parameters already need 791 // to access the queue, so they already need a lock anyway. 792 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 793 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer, 794 results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) { 795 796 // Short circuit if the data was never requested 797 request := pendPool[id] 798 if request == nil { 799 return 0, errNoFetchesPending 800 } 801 reqTimer.UpdateSince(request.Time) 802 delete(pendPool, id) 803 804 // If no data items were retrieved, mark them as unavailable for the origin peer 805 if results == 0 { 806 for _, header := range request.Headers { 807 request.Peer.MarkLacking(header.Hash()) 808 } 809 } 810 // Assemble each of the results with their headers and retrieved data parts 811 var ( 812 accepted int 813 failure error 814 useful bool 815 ) 816 for i, header := range request.Headers { 817 // Short circuit assembly if no more fetch results are found 818 if i >= results { 819 break 820 } 821 // Reconstruct the next result if contents match up 822 index := int(header.Number.Int64() - int64(q.resultOffset)) 823 if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { 824 failure = errInvalidChain 825 break 826 } 827 if err := reconstruct(header, i, q.resultCache[index]); err != nil { 828 failure = err 829 break 830 } 831 donePool[header.Hash()] = struct{}{} 832 q.resultCache[index].Pending-- 833 useful = true 834 accepted++ 835 836 // Clean up a successful fetch 837 request.Headers[i] = nil 838 delete(taskPool, header.Hash()) 839 } 840 // Return all failed or missing fetches to the queue 841 for _, header := range request.Headers { 842 if header != nil { 843 taskQueue.Push(header, -float32(header.Number.Uint64())) 844 } 845 } 846 // Wake up WaitResults 847 if accepted > 0 { 848 q.active.Signal() 849 } 850 // If none of the data was good, it's a stale delivery 851 switch { 852 case failure == nil || failure == errInvalidChain: 853 return accepted, failure 854 case useful: 855 return accepted, fmt.Errorf("partial failure: %v", failure) 856 default: 857 return accepted, errStaleDelivery 858 } 859 } 860 861 // Prepare configures the result cache to allow accepting and caching inbound 862 // fetch results. 863 func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64, head *types.Header) { 864 q.lock.Lock() 865 defer q.lock.Unlock() 866 867 // Prepare the queue for sync results 868 if q.resultOffset < offset { 869 q.resultOffset = offset 870 } 871 q.fastSyncPivot = pivot 872 q.mode = mode 873 }