github.com/ethereum/go-ethereum@v1.16.1/eth/downloader/queue.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the block download scheduler to collect download tasks and schedule 18 // them in an ordered, and throttled way. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/common/prque" 31 "github.com/ethereum/go-ethereum/core/types" 32 "github.com/ethereum/go-ethereum/crypto/kzg4844" 33 "github.com/ethereum/go-ethereum/eth/ethconfig" 34 "github.com/ethereum/go-ethereum/log" 35 "github.com/ethereum/go-ethereum/metrics" 36 "github.com/ethereum/go-ethereum/params" 37 "github.com/ethereum/go-ethereum/rlp" 38 ) 39 40 const ( 41 bodyType = uint(0) 42 receiptType = uint(1) 43 ) 44 45 var ( 46 blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download 47 blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks 48 blockCacheMemory = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching 49 blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones 50 ) 51 52 var ( 53 errNoFetchesPending = errors.New("no fetches pending") 54 errStaleDelivery = errors.New("stale delivery") 55 ) 56 57 // fetchRequest is a currently running data retrieval operation. 58 type fetchRequest struct { 59 Peer *peerConnection // Peer to which the request was sent 60 From uint64 // Requested chain element index (used for skeleton fills only) 61 Headers []*types.Header // Requested headers, sorted by request order 62 Time time.Time // Time when the request was made 63 } 64 65 // fetchResult is a struct collecting partial results from data fetchers until 66 // all outstanding pieces complete and the result as a whole can be processed. 67 type fetchResult struct { 68 pending atomic.Int32 // Flag telling what deliveries are outstanding 69 70 Header *types.Header 71 Uncles []*types.Header 72 Transactions types.Transactions 73 Receipts rlp.RawValue 74 Withdrawals types.Withdrawals 75 } 76 77 func newFetchResult(header *types.Header, snapSync bool) *fetchResult { 78 item := &fetchResult{ 79 Header: header, 80 } 81 if !header.EmptyBody() { 82 item.pending.Store(item.pending.Load() | (1 << bodyType)) 83 } else if header.WithdrawalsHash != nil { 84 item.Withdrawals = make(types.Withdrawals, 0) 85 } 86 if snapSync { 87 if header.EmptyReceipts() { 88 // Ensure the receipts list is valid even if it isn't actively fetched. 89 item.Receipts = rlp.EmptyList 90 } else { 91 item.pending.Store(item.pending.Load() | (1 << receiptType)) 92 } 93 } 94 return item 95 } 96 97 // body returns a representation of the fetch result as a types.Body object. 98 func (f *fetchResult) body() types.Body { 99 return types.Body{ 100 Transactions: f.Transactions, 101 Uncles: f.Uncles, 102 Withdrawals: f.Withdrawals, 103 } 104 } 105 106 // SetBodyDone flags the body as finished. 107 func (f *fetchResult) SetBodyDone() { 108 if v := f.pending.Load(); (v & (1 << bodyType)) != 0 { 109 f.pending.Add(-1) 110 } 111 } 112 113 // AllDone checks if item is done. 114 func (f *fetchResult) AllDone() bool { 115 return f.pending.Load() == 0 116 } 117 118 // SetReceiptsDone flags the receipts as finished. 119 func (f *fetchResult) SetReceiptsDone() { 120 if v := f.pending.Load(); (v & (1 << receiptType)) != 0 { 121 f.pending.Add(-2) 122 } 123 } 124 125 // Done checks if the given type is done already 126 func (f *fetchResult) Done(kind uint) bool { 127 v := f.pending.Load() 128 return v&(1<<kind) == 0 129 } 130 131 // queue represents hashes that are either need fetching or are being fetched 132 type queue struct { 133 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 134 headerHead common.Hash // Hash of the last queued header to verify order 135 136 // All data retrievals below are based on an already assembles header chain 137 blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers 138 blockTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the blocks (bodies) for 139 blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations 140 blockWakeCh chan bool // Channel to notify the block fetcher of new tasks 141 142 receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers 143 receiptTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the receipts for 144 receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations 145 receiptWakeCh chan bool // Channel to notify when receipt fetcher of new tasks 146 147 resultCache *resultStore // Downloaded but not yet delivered fetch results 148 resultSize common.StorageSize // Approximate size of a block (exponential moving average) 149 150 lock *sync.RWMutex 151 active *sync.Cond 152 closed bool 153 154 logTime time.Time // Time instance when status was last reported 155 } 156 157 // newQueue creates a new download queue for scheduling block retrieval. 158 func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue { 159 lock := new(sync.RWMutex) 160 q := &queue{ 161 blockTaskQueue: prque.New[int64, *types.Header](nil), 162 blockWakeCh: make(chan bool, 1), 163 receiptTaskQueue: prque.New[int64, *types.Header](nil), 164 receiptWakeCh: make(chan bool, 1), 165 active: sync.NewCond(lock), 166 lock: lock, 167 } 168 q.Reset(blockCacheLimit, thresholdInitialSize) 169 return q 170 } 171 172 // Reset clears out the queue contents. 173 func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) { 174 q.lock.Lock() 175 defer q.lock.Unlock() 176 177 q.closed = false 178 q.mode = ethconfig.FullSync 179 q.headerHead = common.Hash{} 180 181 q.blockTaskPool = make(map[common.Hash]*types.Header) 182 q.blockTaskQueue.Reset() 183 q.blockPendPool = make(map[string]*fetchRequest) 184 185 q.receiptTaskPool = make(map[common.Hash]*types.Header) 186 q.receiptTaskQueue.Reset() 187 q.receiptPendPool = make(map[string]*fetchRequest) 188 189 q.resultCache = newResultStore(blockCacheLimit) 190 q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize)) 191 } 192 193 // Close marks the end of the sync, unblocking Results. 194 // It may be called even if the queue is already closed. 195 func (q *queue) Close() { 196 q.lock.Lock() 197 q.closed = true 198 q.active.Signal() 199 q.lock.Unlock() 200 } 201 202 // PendingBodies retrieves the number of block body requests pending for retrieval. 203 func (q *queue) PendingBodies() int { 204 q.lock.Lock() 205 defer q.lock.Unlock() 206 207 return q.blockTaskQueue.Size() 208 } 209 210 // PendingReceipts retrieves the number of block receipts pending for retrieval. 211 func (q *queue) PendingReceipts() int { 212 q.lock.Lock() 213 defer q.lock.Unlock() 214 215 return q.receiptTaskQueue.Size() 216 } 217 218 // InFlightBlocks retrieves whether there are block fetch requests currently in 219 // flight. 220 func (q *queue) InFlightBlocks() bool { 221 q.lock.Lock() 222 defer q.lock.Unlock() 223 224 return len(q.blockPendPool) > 0 225 } 226 227 // InFlightReceipts retrieves whether there are receipt fetch requests currently 228 // in flight. 229 func (q *queue) InFlightReceipts() bool { 230 q.lock.Lock() 231 defer q.lock.Unlock() 232 233 return len(q.receiptPendPool) > 0 234 } 235 236 // Idle returns if the queue is fully idle or has some data still inside. 237 func (q *queue) Idle() bool { 238 q.lock.Lock() 239 defer q.lock.Unlock() 240 241 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() 242 pending := len(q.blockPendPool) + len(q.receiptPendPool) 243 244 return (queued + pending) == 0 245 } 246 247 // Schedule adds a set of headers for the download queue for scheduling, returning 248 // the new headers encountered. 249 func (q *queue) Schedule(headers []*types.Header, hashes []common.Hash, from uint64) int { 250 q.lock.Lock() 251 defer q.lock.Unlock() 252 253 // Insert all the headers prioritised by the contained block number 254 var inserts int 255 for i, header := range headers { 256 // Make sure chain order is honoured and preserved throughout 257 hash := hashes[i] 258 if header.Number == nil || header.Number.Uint64() != from { 259 log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) 260 break 261 } 262 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 263 log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 264 break 265 } 266 // Make sure no duplicate requests are executed 267 // We cannot skip this, even if the block is empty, since this is 268 // what triggers the fetchResult creation. 269 if _, ok := q.blockTaskPool[hash]; ok { 270 log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) 271 } else { 272 q.blockTaskPool[hash] = header 273 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 274 } 275 // Queue for receipt retrieval 276 if q.mode == ethconfig.SnapSync && !header.EmptyReceipts() { 277 if _, ok := q.receiptTaskPool[hash]; ok { 278 log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) 279 } else { 280 q.receiptTaskPool[hash] = header 281 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 282 } 283 } 284 inserts++ 285 q.headerHead = hash 286 from++ 287 } 288 return inserts 289 } 290 291 // Results retrieves and permanently removes a batch of fetch results from 292 // the cache. the result slice will be empty if the queue has been closed. 293 // Results can be called concurrently with Deliver and Schedule, 294 // but assumes that there are not two simultaneous callers to Results 295 func (q *queue) Results(block bool) []*fetchResult { 296 // Abort early if there are no items and non-blocking requested 297 if !block && !q.resultCache.HasCompletedItems() { 298 return nil 299 } 300 closed := false 301 for !closed && !q.resultCache.HasCompletedItems() { 302 // In order to wait on 'active', we need to obtain the lock. 303 // That may take a while, if someone is delivering at the same 304 // time, so after obtaining the lock, we check again if there 305 // are any results to fetch. 306 // Also, in-between we ask for the lock and the lock is obtained, 307 // someone can have closed the queue. In that case, we should 308 // return the available results and stop blocking 309 q.lock.Lock() 310 if q.resultCache.HasCompletedItems() || q.closed { 311 q.lock.Unlock() 312 break 313 } 314 // No items available, and not closed 315 q.active.Wait() 316 closed = q.closed 317 q.lock.Unlock() 318 } 319 // Regardless if closed or not, we can still deliver whatever we have 320 results := q.resultCache.GetCompleted(maxResultsProcess) 321 for _, result := range results { 322 // Recalculate the result item weights to prevent memory exhaustion 323 size := result.Header.Size() 324 for _, uncle := range result.Uncles { 325 size += uncle.Size() 326 } 327 size += common.StorageSize(len(result.Receipts)) 328 for _, tx := range result.Transactions { 329 size += common.StorageSize(tx.Size()) 330 } 331 size += common.StorageSize(result.Withdrawals.Size()) 332 q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + 333 (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize 334 } 335 // Using the newly calibrated result size, figure out the new throttle limit 336 // on the result cache 337 throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) 338 throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold) 339 340 // With results removed from the cache, wake throttled fetchers 341 for _, ch := range []chan bool{q.blockWakeCh, q.receiptWakeCh} { 342 select { 343 case ch <- true: 344 default: 345 } 346 } 347 // Log some info at certain times 348 if time.Since(q.logTime) >= 60*time.Second { 349 q.logTime = time.Now() 350 351 info := q.Stats() 352 info = append(info, "throttle", throttleThreshold) 353 log.Debug("Downloader queue stats", info...) 354 } 355 return results 356 } 357 358 func (q *queue) Stats() []interface{} { 359 q.lock.RLock() 360 defer q.lock.RUnlock() 361 362 return q.stats() 363 } 364 365 func (q *queue) stats() []interface{} { 366 return []interface{}{ 367 "receiptTasks", q.receiptTaskQueue.Size(), 368 "blockTasks", q.blockTaskQueue.Size(), 369 "itemSize", q.resultSize, 370 } 371 } 372 373 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 374 // previously failed downloads. Beside the next batch of needed fetches, it also 375 // returns a flag whether empty blocks were queued requiring processing. 376 func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) { 377 q.lock.Lock() 378 defer q.lock.Unlock() 379 380 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType) 381 } 382 383 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 384 // any previously failed downloads. Beside the next batch of needed fetches, it 385 // also returns a flag whether empty receipts were queued requiring importing. 386 func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) { 387 q.lock.Lock() 388 defer q.lock.Unlock() 389 390 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType) 391 } 392 393 // reserveHeaders reserves a set of data download operations for a given peer, 394 // skipping any previously failed ones. This method is a generic version used 395 // by the individual special reservation functions. 396 // 397 // Note, this method expects the queue lock to be already held for writing. The 398 // reason the lock is not obtained in here is because the parameters already need 399 // to access the queue, so they already need a lock anyway. 400 // 401 // Returns: 402 // 403 // item - the fetchRequest 404 // progress - whether any progress was made 405 // throttle - if the caller should throttle for a while 406 func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque[int64, *types.Header], 407 pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) { 408 // Short circuit if the pool has been depleted, or if the peer's already 409 // downloading something (sanity check not to corrupt state) 410 if taskQueue.Empty() { 411 return nil, false, true 412 } 413 if _, ok := pendPool[p.id]; ok { 414 return nil, false, false 415 } 416 // Retrieve a batch of tasks, skipping previously failed ones 417 send := make([]*types.Header, 0, count) 418 skip := make([]*types.Header, 0) 419 progress := false 420 throttled := false 421 for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ { 422 // the task queue will pop items in order, so the highest prio block 423 // is also the lowest block number. 424 header, _ := taskQueue.Peek() 425 426 // we can ask the resultcache if this header is within the 427 // "prioritized" segment of blocks. If it is not, we need to throttle 428 429 stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == ethconfig.SnapSync) 430 if stale { 431 // Don't put back in the task queue, this item has already been 432 // delivered upstream 433 taskQueue.PopItem() 434 progress = true 435 delete(taskPool, header.Hash()) 436 proc = proc - 1 437 log.Error("Fetch reservation already delivered", "number", header.Number.Uint64()) 438 continue 439 } 440 if throttle { 441 // There are no resultslots available. Leave it in the task queue 442 // However, if there are any left as 'skipped', we should not tell 443 // the caller to throttle, since we still want some other 444 // peer to fetch those for us 445 throttled = len(skip) == 0 446 break 447 } 448 if err != nil { 449 // this most definitely should _not_ happen 450 log.Warn("Failed to reserve headers", "err", err) 451 // There are no resultslots available. Leave it in the task queue 452 break 453 } 454 if item.Done(kind) { 455 // If it's a noop, we can skip this task 456 delete(taskPool, header.Hash()) 457 taskQueue.PopItem() 458 proc = proc - 1 459 progress = true 460 continue 461 } 462 // Remove it from the task queue 463 taskQueue.PopItem() 464 // Otherwise unless the peer is known not to have the data, add to the retrieve list 465 if p.Lacks(header.Hash()) { 466 skip = append(skip, header) 467 } else { 468 send = append(send, header) 469 } 470 } 471 // Merge all the skipped headers back 472 for _, header := range skip { 473 taskQueue.Push(header, -int64(header.Number.Uint64())) 474 } 475 if q.resultCache.HasCompletedItems() { 476 // Wake Results, resultCache was modified 477 q.active.Signal() 478 } 479 // Assemble and return the block download request 480 if len(send) == 0 { 481 return nil, progress, throttled 482 } 483 request := &fetchRequest{ 484 Peer: p, 485 Headers: send, 486 Time: time.Now(), 487 } 488 pendPool[p.id] = request 489 return request, progress, throttled 490 } 491 492 // Revoke cancels all pending requests belonging to a given peer. This method is 493 // meant to be called during a peer drop to quickly reassign owned data fetches 494 // to remaining nodes. 495 func (q *queue) Revoke(peerID string) { 496 q.lock.Lock() 497 defer q.lock.Unlock() 498 499 if request, ok := q.blockPendPool[peerID]; ok { 500 for _, header := range request.Headers { 501 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 502 } 503 delete(q.blockPendPool, peerID) 504 } 505 if request, ok := q.receiptPendPool[peerID]; ok { 506 for _, header := range request.Headers { 507 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 508 } 509 delete(q.receiptPendPool, peerID) 510 } 511 } 512 513 // ExpireBodies checks for in flight block body requests that exceeded a timeout 514 // allowance, canceling them and returning the responsible peers for penalisation. 515 func (q *queue) ExpireBodies(peer string) int { 516 q.lock.Lock() 517 defer q.lock.Unlock() 518 519 bodyTimeoutMeter.Mark(1) 520 return q.expire(peer, q.blockPendPool, q.blockTaskQueue) 521 } 522 523 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 524 // allowance, canceling them and returning the responsible peers for penalisation. 525 func (q *queue) ExpireReceipts(peer string) int { 526 q.lock.Lock() 527 defer q.lock.Unlock() 528 529 receiptTimeoutMeter.Mark(1) 530 return q.expire(peer, q.receiptPendPool, q.receiptTaskQueue) 531 } 532 533 // expire is the generic check that moves a specific expired task from a pending 534 // pool back into a task pool. The syntax on the passed taskQueue is a bit weird 535 // as we would need a generic expire method to handle both types, but that is not 536 // supported at the moment at least (Go 1.19). 537 // 538 // Note, this method expects the queue lock to be already held. The reason the 539 // lock is not obtained in here is that the parameters already need to access 540 // the queue, so they already need a lock anyway. 541 func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue interface{}) int { 542 // Retrieve the request being expired and log an error if it's non-existent, 543 // as there's no order of events that should lead to such expirations. 544 req := pendPool[peer] 545 if req == nil { 546 log.Error("Expired request does not exist", "peer", peer) 547 return 0 548 } 549 delete(pendPool, peer) 550 551 // Return any non-satisfied requests to the pool 552 if req.From > 0 { 553 taskQueue.(*prque.Prque[int64, uint64]).Push(req.From, -int64(req.From)) 554 } 555 for _, header := range req.Headers { 556 taskQueue.(*prque.Prque[int64, *types.Header]).Push(header, -int64(header.Number.Uint64())) 557 } 558 return len(req.Headers) 559 } 560 561 // DeliverBodies injects a block body retrieval response into the results queue. 562 // The method returns the number of blocks bodies accepted from the delivery and 563 // also wakes any threads waiting for data delivery. 564 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash, 565 uncleLists [][]*types.Header, uncleListHashes []common.Hash, 566 withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash, 567 ) (int, error) { 568 q.lock.Lock() 569 defer q.lock.Unlock() 570 571 validate := func(index int, header *types.Header) error { 572 if txListHashes[index] != header.TxHash { 573 return errInvalidBody 574 } 575 if uncleListHashes[index] != header.UncleHash { 576 return errInvalidBody 577 } 578 if header.WithdrawalsHash == nil { 579 // nil hash means that withdrawals should not be present in body 580 if withdrawalLists[index] != nil { 581 return errInvalidBody 582 } 583 } else { // non-nil hash: body must have withdrawals 584 if withdrawalLists[index] == nil { 585 return errInvalidBody 586 } 587 if withdrawalListHashes[index] != *header.WithdrawalsHash { 588 return errInvalidBody 589 } 590 } 591 // Blocks must have a number of blobs corresponding to the header gas usage, 592 // and zero before the Cancun hardfork. 593 var blobs int 594 for _, tx := range txLists[index] { 595 // Count the number of blobs to validate against the header's blobGasUsed 596 blobs += len(tx.BlobHashes()) 597 598 // Validate the data blobs individually too 599 if tx.Type() == types.BlobTxType { 600 if len(tx.BlobHashes()) == 0 { 601 return errInvalidBody 602 } 603 for _, hash := range tx.BlobHashes() { 604 if !kzg4844.IsValidVersionedHash(hash[:]) { 605 return errInvalidBody 606 } 607 } 608 if tx.BlobTxSidecar() != nil { 609 return errInvalidBody 610 } 611 } 612 } 613 if header.BlobGasUsed != nil { 614 if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated 615 return errInvalidBody 616 } 617 } else { 618 if blobs != 0 { 619 return errInvalidBody 620 } 621 } 622 return nil 623 } 624 625 reconstruct := func(index int, result *fetchResult) { 626 result.Transactions = txLists[index] 627 result.Uncles = uncleLists[index] 628 result.Withdrawals = withdrawalLists[index] 629 result.SetBodyDone() 630 } 631 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, 632 bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct) 633 } 634 635 // DeliverReceipts injects a receipt retrieval response into the results queue. 636 // The method returns the number of transaction receipts accepted from the delivery 637 // and also wakes any threads waiting for data delivery. 638 func (q *queue) DeliverReceipts(id string, receiptList []rlp.RawValue, receiptListHashes []common.Hash) (int, error) { 639 q.lock.Lock() 640 defer q.lock.Unlock() 641 642 validate := func(index int, header *types.Header) error { 643 if receiptListHashes[index] != header.ReceiptHash { 644 return errInvalidReceipt 645 } 646 return nil 647 } 648 reconstruct := func(index int, result *fetchResult) { 649 result.Receipts = receiptList[index] 650 result.SetReceiptsDone() 651 } 652 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, 653 receiptReqTimer, receiptInMeter, receiptDropMeter, len(receiptList), validate, reconstruct) 654 } 655 656 // deliver injects a data retrieval response into the results queue. 657 // 658 // Note, this method expects the queue lock to be already held for writing. The 659 // reason this lock is not obtained in here is because the parameters already need 660 // to access the queue, so they already need a lock anyway. 661 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, 662 taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest, 663 reqTimer *metrics.Timer, resInMeter, resDropMeter *metrics.Meter, 664 results int, validate func(index int, header *types.Header) error, 665 reconstruct func(index int, result *fetchResult)) (int, error) { 666 // Short circuit if the data was never requested 667 request := pendPool[id] 668 if request == nil { 669 resDropMeter.Mark(int64(results)) 670 return 0, errNoFetchesPending 671 } 672 delete(pendPool, id) 673 674 reqTimer.UpdateSince(request.Time) 675 resInMeter.Mark(int64(results)) 676 677 // If no data items were retrieved, mark them as unavailable for the origin peer 678 if results == 0 { 679 for _, header := range request.Headers { 680 request.Peer.MarkLacking(header.Hash()) 681 } 682 } 683 // Assemble each of the results with their headers and retrieved data parts 684 var ( 685 accepted int 686 failure error 687 i int 688 hashes []common.Hash 689 ) 690 for _, header := range request.Headers { 691 // Short circuit assembly if no more fetch results are found 692 if i >= results { 693 break 694 } 695 // Validate the fields 696 if err := validate(i, header); err != nil { 697 failure = err 698 break 699 } 700 hashes = append(hashes, header.Hash()) 701 i++ 702 } 703 704 for _, header := range request.Headers[:i] { 705 if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil && !stale { 706 reconstruct(accepted, res) 707 } else { 708 // else: between here and above, some other peer filled this result, 709 // or it was indeed a no-op. This should not happen, but if it does it's 710 // not something to panic about 711 log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) 712 failure = errStaleDelivery 713 } 714 // Clean up a successful fetch 715 delete(taskPool, hashes[accepted]) 716 accepted++ 717 } 718 resDropMeter.Mark(int64(results - accepted)) 719 720 // Return all failed or missing fetches to the queue 721 for _, header := range request.Headers[accepted:] { 722 taskQueue.Push(header, -int64(header.Number.Uint64())) 723 } 724 // Wake up Results 725 if accepted > 0 { 726 q.active.Signal() 727 } 728 if failure == nil { 729 return accepted, nil 730 } 731 // If none of the data was good, it's a stale delivery 732 if accepted > 0 { 733 return accepted, fmt.Errorf("partial failure: %v", failure) 734 } 735 return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery) 736 } 737 738 // Prepare configures the result cache to allow accepting and caching inbound 739 // fetch results. 740 func (q *queue) Prepare(offset uint64, mode SyncMode) { 741 q.lock.Lock() 742 defer q.lock.Unlock() 743 744 // Prepare the queue for sync results 745 q.resultCache.Prepare(offset) 746 q.mode = mode 747 }