github.com/carter-ya/go-ethereum@v0.0.0-20230628080049-d2309be3983b/eth/downloader/queue.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the block download scheduler to collect download tasks and schedule 18 // them in an ordered, and throttled way. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/common/prque" 31 "github.com/ethereum/go-ethereum/core/types" 32 "github.com/ethereum/go-ethereum/log" 33 "github.com/ethereum/go-ethereum/metrics" 34 ) 35 36 const ( 37 bodyType = uint(0) 38 receiptType = uint(1) 39 ) 40 41 var ( 42 blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download 43 blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks 44 blockCacheMemory = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching 45 blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones 46 ) 47 48 var ( 49 errNoFetchesPending = errors.New("no fetches pending") 50 errStaleDelivery = errors.New("stale delivery") 51 ) 52 53 // fetchRequest is a currently running data retrieval operation. 54 type fetchRequest struct { 55 Peer *peerConnection // Peer to which the request was sent 56 From uint64 // Requested chain element index (used for skeleton fills only) 57 Headers []*types.Header // Requested headers, sorted by request order 58 Time time.Time // Time when the request was made 59 } 60 61 // fetchResult is a struct collecting partial results from data fetchers until 62 // all outstanding pieces complete and the result as a whole can be processed. 63 type fetchResult struct { 64 pending int32 // Flag telling what deliveries are outstanding 65 66 Header *types.Header 67 Uncles []*types.Header 68 Transactions types.Transactions 69 Receipts types.Receipts 70 } 71 72 func newFetchResult(header *types.Header, fastSync bool) *fetchResult { 73 item := &fetchResult{ 74 Header: header, 75 } 76 if !header.EmptyBody() { 77 item.pending |= (1 << bodyType) 78 } 79 if fastSync && !header.EmptyReceipts() { 80 item.pending |= (1 << receiptType) 81 } 82 return item 83 } 84 85 // SetBodyDone flags the body as finished. 86 func (f *fetchResult) SetBodyDone() { 87 if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 { 88 atomic.AddInt32(&f.pending, -1) 89 } 90 } 91 92 // AllDone checks if item is done. 93 func (f *fetchResult) AllDone() bool { 94 return atomic.LoadInt32(&f.pending) == 0 95 } 96 97 // SetReceiptsDone flags the receipts as finished. 98 func (f *fetchResult) SetReceiptsDone() { 99 if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 { 100 atomic.AddInt32(&f.pending, -2) 101 } 102 } 103 104 // Done checks if the given type is done already 105 func (f *fetchResult) Done(kind uint) bool { 106 v := atomic.LoadInt32(&f.pending) 107 return v&(1<<kind) == 0 108 } 109 110 // queue represents hashes that are either need fetching or are being fetched 111 type queue struct { 112 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 113 114 // Headers are "special", they download in batches, supported by a skeleton chain 115 headerHead common.Hash // Hash of the last queued header to verify order 116 headerTaskPool map[uint64]*types.Header // Pending header retrieval tasks, mapping starting indexes to skeleton headers 117 headerTaskQueue *prque.Prque // Priority queue of the skeleton indexes to fetch the filling headers for 118 headerPeerMiss map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable 119 headerPendPool map[string]*fetchRequest // Currently pending header retrieval operations 120 headerResults []*types.Header // Result cache accumulating the completed headers 121 headerHashes []common.Hash // Result cache accumulating the completed header hashes 122 headerProced int // Number of headers already processed from the results 123 headerOffset uint64 // Number of the first header in the result cache 124 headerContCh chan bool // Channel to notify when header download finishes 125 126 // All data retrievals below are based on an already assembles header chain 127 blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers 128 blockTaskQueue *prque.Prque // Priority queue of the headers to fetch the blocks (bodies) for 129 blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations 130 blockWakeCh chan bool // Channel to notify the block fetcher of new tasks 131 132 receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers 133 receiptTaskQueue *prque.Prque // Priority queue of the headers to fetch the receipts for 134 receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations 135 receiptWakeCh chan bool // Channel to notify when receipt fetcher of new tasks 136 137 resultCache *resultStore // Downloaded but not yet delivered fetch results 138 resultSize common.StorageSize // Approximate size of a block (exponential moving average) 139 140 lock *sync.RWMutex 141 active *sync.Cond 142 closed bool 143 144 lastStatLog time.Time 145 } 146 147 // newQueue creates a new download queue for scheduling block retrieval. 148 func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue { 149 lock := new(sync.RWMutex) 150 q := &queue{ 151 headerContCh: make(chan bool, 1), 152 blockTaskQueue: prque.New(nil), 153 blockWakeCh: make(chan bool, 1), 154 receiptTaskQueue: prque.New(nil), 155 receiptWakeCh: make(chan bool, 1), 156 active: sync.NewCond(lock), 157 lock: lock, 158 } 159 q.Reset(blockCacheLimit, thresholdInitialSize) 160 return q 161 } 162 163 // Reset clears out the queue contents. 164 func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) { 165 q.lock.Lock() 166 defer q.lock.Unlock() 167 168 q.closed = false 169 q.mode = FullSync 170 171 q.headerHead = common.Hash{} 172 q.headerPendPool = make(map[string]*fetchRequest) 173 174 q.blockTaskPool = make(map[common.Hash]*types.Header) 175 q.blockTaskQueue.Reset() 176 q.blockPendPool = make(map[string]*fetchRequest) 177 178 q.receiptTaskPool = make(map[common.Hash]*types.Header) 179 q.receiptTaskQueue.Reset() 180 q.receiptPendPool = make(map[string]*fetchRequest) 181 182 q.resultCache = newResultStore(blockCacheLimit) 183 q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize)) 184 } 185 186 // Close marks the end of the sync, unblocking Results. 187 // It may be called even if the queue is already closed. 188 func (q *queue) Close() { 189 q.lock.Lock() 190 q.closed = true 191 q.active.Signal() 192 q.lock.Unlock() 193 } 194 195 // PendingHeaders retrieves the number of header requests pending for retrieval. 196 func (q *queue) PendingHeaders() int { 197 q.lock.Lock() 198 defer q.lock.Unlock() 199 200 return q.headerTaskQueue.Size() 201 } 202 203 // PendingBodies retrieves the number of block body requests pending for retrieval. 204 func (q *queue) PendingBodies() int { 205 q.lock.Lock() 206 defer q.lock.Unlock() 207 208 return q.blockTaskQueue.Size() 209 } 210 211 // PendingReceipts retrieves the number of block receipts pending for retrieval. 212 func (q *queue) PendingReceipts() int { 213 q.lock.Lock() 214 defer q.lock.Unlock() 215 216 return q.receiptTaskQueue.Size() 217 } 218 219 // InFlightBlocks retrieves whether there are block fetch requests currently in 220 // flight. 221 func (q *queue) InFlightBlocks() bool { 222 q.lock.Lock() 223 defer q.lock.Unlock() 224 225 return len(q.blockPendPool) > 0 226 } 227 228 // InFlightReceipts retrieves whether there are receipt fetch requests currently 229 // in flight. 230 func (q *queue) InFlightReceipts() bool { 231 q.lock.Lock() 232 defer q.lock.Unlock() 233 234 return len(q.receiptPendPool) > 0 235 } 236 237 // Idle returns if the queue is fully idle or has some data still inside. 238 func (q *queue) Idle() bool { 239 q.lock.Lock() 240 defer q.lock.Unlock() 241 242 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() 243 pending := len(q.blockPendPool) + len(q.receiptPendPool) 244 245 return (queued + pending) == 0 246 } 247 248 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill 249 // up an already retrieved header skeleton. 250 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { 251 q.lock.Lock() 252 defer q.lock.Unlock() 253 254 // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) 255 if q.headerResults != nil { 256 panic("skeleton assembly already in progress") 257 } 258 // Schedule all the header retrieval tasks for the skeleton assembly 259 q.headerTaskPool = make(map[uint64]*types.Header) 260 q.headerTaskQueue = prque.New(nil) 261 q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains 262 q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) 263 q.headerHashes = make([]common.Hash, len(skeleton)*MaxHeaderFetch) 264 q.headerProced = 0 265 q.headerOffset = from 266 q.headerContCh = make(chan bool, 1) 267 268 for i, header := range skeleton { 269 index := from + uint64(i*MaxHeaderFetch) 270 271 q.headerTaskPool[index] = header 272 q.headerTaskQueue.Push(index, -int64(index)) 273 } 274 } 275 276 // RetrieveHeaders retrieves the header chain assemble based on the scheduled 277 // skeleton. 278 func (q *queue) RetrieveHeaders() ([]*types.Header, []common.Hash, int) { 279 q.lock.Lock() 280 defer q.lock.Unlock() 281 282 headers, hashes, proced := q.headerResults, q.headerHashes, q.headerProced 283 q.headerResults, q.headerHashes, q.headerProced = nil, nil, 0 284 285 return headers, hashes, proced 286 } 287 288 // Schedule adds a set of headers for the download queue for scheduling, returning 289 // the new headers encountered. 290 func (q *queue) Schedule(headers []*types.Header, hashes []common.Hash, from uint64) []*types.Header { 291 q.lock.Lock() 292 defer q.lock.Unlock() 293 294 // Insert all the headers prioritised by the contained block number 295 inserts := make([]*types.Header, 0, len(headers)) 296 for i, header := range headers { 297 // Make sure chain order is honoured and preserved throughout 298 hash := hashes[i] 299 if header.Number == nil || header.Number.Uint64() != from { 300 log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) 301 break 302 } 303 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 304 log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 305 break 306 } 307 // Make sure no duplicate requests are executed 308 // We cannot skip this, even if the block is empty, since this is 309 // what triggers the fetchResult creation. 310 if _, ok := q.blockTaskPool[hash]; ok { 311 log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) 312 } else { 313 q.blockTaskPool[hash] = header 314 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 315 } 316 // Queue for receipt retrieval 317 if q.mode == SnapSync && !header.EmptyReceipts() { 318 if _, ok := q.receiptTaskPool[hash]; ok { 319 log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) 320 } else { 321 q.receiptTaskPool[hash] = header 322 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 323 } 324 } 325 inserts = append(inserts, header) 326 q.headerHead = hash 327 from++ 328 } 329 return inserts 330 } 331 332 // Results retrieves and permanently removes a batch of fetch results from 333 // the cache. the result slice will be empty if the queue has been closed. 334 // Results can be called concurrently with Deliver and Schedule, 335 // but assumes that there are not two simultaneous callers to Results 336 func (q *queue) Results(block bool) []*fetchResult { 337 // Abort early if there are no items and non-blocking requested 338 if !block && !q.resultCache.HasCompletedItems() { 339 return nil 340 } 341 closed := false 342 for !closed && !q.resultCache.HasCompletedItems() { 343 // In order to wait on 'active', we need to obtain the lock. 344 // That may take a while, if someone is delivering at the same 345 // time, so after obtaining the lock, we check again if there 346 // are any results to fetch. 347 // Also, in-between we ask for the lock and the lock is obtained, 348 // someone can have closed the queue. In that case, we should 349 // return the available results and stop blocking 350 q.lock.Lock() 351 if q.resultCache.HasCompletedItems() || q.closed { 352 q.lock.Unlock() 353 break 354 } 355 // No items available, and not closed 356 q.active.Wait() 357 closed = q.closed 358 q.lock.Unlock() 359 } 360 // Regardless if closed or not, we can still deliver whatever we have 361 results := q.resultCache.GetCompleted(maxResultsProcess) 362 for _, result := range results { 363 // Recalculate the result item weights to prevent memory exhaustion 364 size := result.Header.Size() 365 for _, uncle := range result.Uncles { 366 size += uncle.Size() 367 } 368 for _, receipt := range result.Receipts { 369 size += receipt.Size() 370 } 371 for _, tx := range result.Transactions { 372 size += tx.Size() 373 } 374 q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + 375 (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize 376 } 377 // Using the newly calibrated resultsize, figure out the new throttle limit 378 // on the result cache 379 throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) 380 throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold) 381 382 // With results removed from the cache, wake throttled fetchers 383 for _, ch := range []chan bool{q.blockWakeCh, q.receiptWakeCh} { 384 select { 385 case ch <- true: 386 default: 387 } 388 } 389 // Log some info at certain times 390 if time.Since(q.lastStatLog) > 60*time.Second { 391 q.lastStatLog = time.Now() 392 info := q.Stats() 393 info = append(info, "throttle", throttleThreshold) 394 log.Info("Downloader queue stats", info...) 395 } 396 return results 397 } 398 399 func (q *queue) Stats() []interface{} { 400 q.lock.RLock() 401 defer q.lock.RUnlock() 402 403 return q.stats() 404 } 405 406 func (q *queue) stats() []interface{} { 407 return []interface{}{ 408 "receiptTasks", q.receiptTaskQueue.Size(), 409 "blockTasks", q.blockTaskQueue.Size(), 410 "itemSize", q.resultSize, 411 } 412 } 413 414 // ReserveHeaders reserves a set of headers for the given peer, skipping any 415 // previously failed batches. 416 func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { 417 q.lock.Lock() 418 defer q.lock.Unlock() 419 420 // Short circuit if the peer's already downloading something (sanity check to 421 // not corrupt state) 422 if _, ok := q.headerPendPool[p.id]; ok { 423 return nil 424 } 425 // Retrieve a batch of hashes, skipping previously failed ones 426 send, skip := uint64(0), []uint64{} 427 for send == 0 && !q.headerTaskQueue.Empty() { 428 from, _ := q.headerTaskQueue.Pop() 429 if q.headerPeerMiss[p.id] != nil { 430 if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { 431 skip = append(skip, from.(uint64)) 432 continue 433 } 434 } 435 send = from.(uint64) 436 } 437 // Merge all the skipped batches back 438 for _, from := range skip { 439 q.headerTaskQueue.Push(from, -int64(from)) 440 } 441 // Assemble and return the block download request 442 if send == 0 { 443 return nil 444 } 445 request := &fetchRequest{ 446 Peer: p, 447 From: send, 448 Time: time.Now(), 449 } 450 q.headerPendPool[p.id] = request 451 return request 452 } 453 454 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 455 // previously failed downloads. Beside the next batch of needed fetches, it also 456 // returns a flag whether empty blocks were queued requiring processing. 457 func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) { 458 q.lock.Lock() 459 defer q.lock.Unlock() 460 461 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType) 462 } 463 464 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 465 // any previously failed downloads. Beside the next batch of needed fetches, it 466 // also returns a flag whether empty receipts were queued requiring importing. 467 func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) { 468 q.lock.Lock() 469 defer q.lock.Unlock() 470 471 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType) 472 } 473 474 // reserveHeaders reserves a set of data download operations for a given peer, 475 // skipping any previously failed ones. This method is a generic version used 476 // by the individual special reservation functions. 477 // 478 // Note, this method expects the queue lock to be already held for writing. The 479 // reason the lock is not obtained in here is because the parameters already need 480 // to access the queue, so they already need a lock anyway. 481 // 482 // Returns: 483 // 484 // item - the fetchRequest 485 // progress - whether any progress was made 486 // throttle - if the caller should throttle for a while 487 func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 488 pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) { 489 // Short circuit if the pool has been depleted, or if the peer's already 490 // downloading something (sanity check not to corrupt state) 491 if taskQueue.Empty() { 492 return nil, false, true 493 } 494 if _, ok := pendPool[p.id]; ok { 495 return nil, false, false 496 } 497 // Retrieve a batch of tasks, skipping previously failed ones 498 send := make([]*types.Header, 0, count) 499 skip := make([]*types.Header, 0) 500 progress := false 501 throttled := false 502 for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ { 503 // the task queue will pop items in order, so the highest prio block 504 // is also the lowest block number. 505 h, _ := taskQueue.Peek() 506 header := h.(*types.Header) 507 // we can ask the resultcache if this header is within the 508 // "prioritized" segment of blocks. If it is not, we need to throttle 509 510 stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == SnapSync) 511 if stale { 512 // Don't put back in the task queue, this item has already been 513 // delivered upstream 514 taskQueue.PopItem() 515 progress = true 516 delete(taskPool, header.Hash()) 517 proc = proc - 1 518 log.Error("Fetch reservation already delivered", "number", header.Number.Uint64()) 519 continue 520 } 521 if throttle { 522 // There are no resultslots available. Leave it in the task queue 523 // However, if there are any left as 'skipped', we should not tell 524 // the caller to throttle, since we still want some other 525 // peer to fetch those for us 526 throttled = len(skip) == 0 527 break 528 } 529 if err != nil { 530 // this most definitely should _not_ happen 531 log.Warn("Failed to reserve headers", "err", err) 532 // There are no resultslots available. Leave it in the task queue 533 break 534 } 535 if item.Done(kind) { 536 // If it's a noop, we can skip this task 537 delete(taskPool, header.Hash()) 538 taskQueue.PopItem() 539 proc = proc - 1 540 progress = true 541 continue 542 } 543 // Remove it from the task queue 544 taskQueue.PopItem() 545 // Otherwise unless the peer is known not to have the data, add to the retrieve list 546 if p.Lacks(header.Hash()) { 547 skip = append(skip, header) 548 } else { 549 send = append(send, header) 550 } 551 } 552 // Merge all the skipped headers back 553 for _, header := range skip { 554 taskQueue.Push(header, -int64(header.Number.Uint64())) 555 } 556 if q.resultCache.HasCompletedItems() { 557 // Wake Results, resultCache was modified 558 q.active.Signal() 559 } 560 // Assemble and return the block download request 561 if len(send) == 0 { 562 return nil, progress, throttled 563 } 564 request := &fetchRequest{ 565 Peer: p, 566 Headers: send, 567 Time: time.Now(), 568 } 569 pendPool[p.id] = request 570 return request, progress, throttled 571 } 572 573 // Revoke cancels all pending requests belonging to a given peer. This method is 574 // meant to be called during a peer drop to quickly reassign owned data fetches 575 // to remaining nodes. 576 func (q *queue) Revoke(peerID string) { 577 q.lock.Lock() 578 defer q.lock.Unlock() 579 580 if request, ok := q.headerPendPool[peerID]; ok { 581 q.headerTaskQueue.Push(request.From, -int64(request.From)) 582 delete(q.headerPendPool, peerID) 583 } 584 if request, ok := q.blockPendPool[peerID]; ok { 585 for _, header := range request.Headers { 586 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 587 } 588 delete(q.blockPendPool, peerID) 589 } 590 if request, ok := q.receiptPendPool[peerID]; ok { 591 for _, header := range request.Headers { 592 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 593 } 594 delete(q.receiptPendPool, peerID) 595 } 596 } 597 598 // ExpireHeaders cancels a request that timed out and moves the pending fetch 599 // task back into the queue for rescheduling. 600 func (q *queue) ExpireHeaders(peer string) int { 601 q.lock.Lock() 602 defer q.lock.Unlock() 603 604 headerTimeoutMeter.Mark(1) 605 return q.expire(peer, q.headerPendPool, q.headerTaskQueue) 606 } 607 608 // ExpireBodies checks for in flight block body requests that exceeded a timeout 609 // allowance, canceling them and returning the responsible peers for penalisation. 610 func (q *queue) ExpireBodies(peer string) int { 611 q.lock.Lock() 612 defer q.lock.Unlock() 613 614 bodyTimeoutMeter.Mark(1) 615 return q.expire(peer, q.blockPendPool, q.blockTaskQueue) 616 } 617 618 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 619 // allowance, canceling them and returning the responsible peers for penalisation. 620 func (q *queue) ExpireReceipts(peer string) int { 621 q.lock.Lock() 622 defer q.lock.Unlock() 623 624 receiptTimeoutMeter.Mark(1) 625 return q.expire(peer, q.receiptPendPool, q.receiptTaskQueue) 626 } 627 628 // expire is the generic check that moves a specific expired task from a pending 629 // pool back into a task pool. 630 // 631 // Note, this method expects the queue lock to be already held. The reason the 632 // lock is not obtained in here is that the parameters already need to access 633 // the queue, so they already need a lock anyway. 634 func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) int { 635 // Retrieve the request being expired and log an error if it's non-existnet, 636 // as there's no order of events that should lead to such expirations. 637 req := pendPool[peer] 638 if req == nil { 639 log.Error("Expired request does not exist", "peer", peer) 640 return 0 641 } 642 delete(pendPool, peer) 643 644 // Return any non-satisfied requests to the pool 645 if req.From > 0 { 646 taskQueue.Push(req.From, -int64(req.From)) 647 } 648 for _, header := range req.Headers { 649 taskQueue.Push(header, -int64(header.Number.Uint64())) 650 } 651 return len(req.Headers) 652 } 653 654 // DeliverHeaders injects a header retrieval response into the header results 655 // cache. This method either accepts all headers it received, or none of them 656 // if they do not map correctly to the skeleton. 657 // 658 // If the headers are accepted, the method makes an attempt to deliver the set 659 // of ready headers to the processor to keep the pipeline full. However, it will 660 // not block to prevent stalling other pending deliveries. 661 func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []common.Hash, headerProcCh chan *headerTask) (int, error) { 662 q.lock.Lock() 663 defer q.lock.Unlock() 664 665 var logger log.Logger 666 if len(id) < 16 { 667 // Tests use short IDs, don't choke on them 668 logger = log.New("peer", id) 669 } else { 670 logger = log.New("peer", id[:16]) 671 } 672 // Short circuit if the data was never requested 673 request := q.headerPendPool[id] 674 if request == nil { 675 headerDropMeter.Mark(int64(len(headers))) 676 return 0, errNoFetchesPending 677 } 678 delete(q.headerPendPool, id) 679 680 headerReqTimer.UpdateSince(request.Time) 681 headerInMeter.Mark(int64(len(headers))) 682 683 // Ensure headers can be mapped onto the skeleton chain 684 target := q.headerTaskPool[request.From].Hash() 685 686 accepted := len(headers) == MaxHeaderFetch 687 if accepted { 688 if headers[0].Number.Uint64() != request.From { 689 logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", hashes[0], "expected", request.From) 690 accepted = false 691 } else if hashes[len(headers)-1] != target { 692 logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", hashes[len(headers)-1], "expected", target) 693 accepted = false 694 } 695 } 696 if accepted { 697 parentHash := hashes[0] 698 for i, header := range headers[1:] { 699 hash := hashes[i+1] 700 if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { 701 logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want) 702 accepted = false 703 break 704 } 705 if parentHash != header.ParentHash { 706 logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 707 accepted = false 708 break 709 } 710 // Set-up parent hash for next round 711 parentHash = hash 712 } 713 } 714 // If the batch of headers wasn't accepted, mark as unavailable 715 if !accepted { 716 logger.Trace("Skeleton filling not accepted", "from", request.From) 717 headerDropMeter.Mark(int64(len(headers))) 718 719 miss := q.headerPeerMiss[id] 720 if miss == nil { 721 q.headerPeerMiss[id] = make(map[uint64]struct{}) 722 miss = q.headerPeerMiss[id] 723 } 724 miss[request.From] = struct{}{} 725 726 q.headerTaskQueue.Push(request.From, -int64(request.From)) 727 return 0, errors.New("delivery not accepted") 728 } 729 // Clean up a successful fetch and try to deliver any sub-results 730 copy(q.headerResults[request.From-q.headerOffset:], headers) 731 copy(q.headerHashes[request.From-q.headerOffset:], hashes) 732 733 delete(q.headerTaskPool, request.From) 734 735 ready := 0 736 for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { 737 ready += MaxHeaderFetch 738 } 739 if ready > 0 { 740 // Headers are ready for delivery, gather them and push forward (non blocking) 741 processHeaders := make([]*types.Header, ready) 742 copy(processHeaders, q.headerResults[q.headerProced:q.headerProced+ready]) 743 744 processHashes := make([]common.Hash, ready) 745 copy(processHashes, q.headerHashes[q.headerProced:q.headerProced+ready]) 746 747 select { 748 case headerProcCh <- &headerTask{ 749 headers: processHeaders, 750 hashes: processHashes, 751 }: 752 logger.Trace("Pre-scheduled new headers", "count", len(processHeaders), "from", processHeaders[0].Number) 753 q.headerProced += len(processHeaders) 754 default: 755 } 756 } 757 // Check for termination and return 758 if len(q.headerTaskPool) == 0 { 759 q.headerContCh <- false 760 } 761 return len(headers), nil 762 } 763 764 // DeliverBodies injects a block body retrieval response into the results queue. 765 // The method returns the number of blocks bodies accepted from the delivery and 766 // also wakes any threads waiting for data delivery. 767 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash, uncleLists [][]*types.Header, uncleListHashes []common.Hash) (int, error) { 768 q.lock.Lock() 769 defer q.lock.Unlock() 770 771 validate := func(index int, header *types.Header) error { 772 if txListHashes[index] != header.TxHash { 773 return errInvalidBody 774 } 775 if uncleListHashes[index] != header.UncleHash { 776 return errInvalidBody 777 } 778 return nil 779 } 780 781 reconstruct := func(index int, result *fetchResult) { 782 result.Transactions = txLists[index] 783 result.Uncles = uncleLists[index] 784 result.SetBodyDone() 785 } 786 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, 787 bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct) 788 } 789 790 // DeliverReceipts injects a receipt retrieval response into the results queue. 791 // The method returns the number of transaction receipts accepted from the delivery 792 // and also wakes any threads waiting for data delivery. 793 func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt, receiptListHashes []common.Hash) (int, error) { 794 q.lock.Lock() 795 defer q.lock.Unlock() 796 797 validate := func(index int, header *types.Header) error { 798 if receiptListHashes[index] != header.ReceiptHash { 799 return errInvalidReceipt 800 } 801 return nil 802 } 803 reconstruct := func(index int, result *fetchResult) { 804 result.Receipts = receiptList[index] 805 result.SetReceiptsDone() 806 } 807 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, 808 receiptReqTimer, receiptInMeter, receiptDropMeter, len(receiptList), validate, reconstruct) 809 } 810 811 // deliver injects a data retrieval response into the results queue. 812 // 813 // Note, this method expects the queue lock to be already held for writing. The 814 // reason this lock is not obtained in here is because the parameters already need 815 // to access the queue, so they already need a lock anyway. 816 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, 817 taskQueue *prque.Prque, pendPool map[string]*fetchRequest, 818 reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter, 819 results int, validate func(index int, header *types.Header) error, 820 reconstruct func(index int, result *fetchResult)) (int, error) { 821 // Short circuit if the data was never requested 822 request := pendPool[id] 823 if request == nil { 824 resDropMeter.Mark(int64(results)) 825 return 0, errNoFetchesPending 826 } 827 delete(pendPool, id) 828 829 reqTimer.UpdateSince(request.Time) 830 resInMeter.Mark(int64(results)) 831 832 // If no data items were retrieved, mark them as unavailable for the origin peer 833 if results == 0 { 834 for _, header := range request.Headers { 835 request.Peer.MarkLacking(header.Hash()) 836 } 837 } 838 // Assemble each of the results with their headers and retrieved data parts 839 var ( 840 accepted int 841 failure error 842 i int 843 hashes []common.Hash 844 ) 845 for _, header := range request.Headers { 846 // Short circuit assembly if no more fetch results are found 847 if i >= results { 848 break 849 } 850 // Validate the fields 851 if err := validate(i, header); err != nil { 852 failure = err 853 break 854 } 855 hashes = append(hashes, header.Hash()) 856 i++ 857 } 858 859 for _, header := range request.Headers[:i] { 860 if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil && !stale { 861 reconstruct(accepted, res) 862 } else { 863 // else: between here and above, some other peer filled this result, 864 // or it was indeed a no-op. This should not happen, but if it does it's 865 // not something to panic about 866 log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) 867 failure = errStaleDelivery 868 } 869 // Clean up a successful fetch 870 delete(taskPool, hashes[accepted]) 871 accepted++ 872 } 873 resDropMeter.Mark(int64(results - accepted)) 874 875 // Return all failed or missing fetches to the queue 876 for _, header := range request.Headers[accepted:] { 877 taskQueue.Push(header, -int64(header.Number.Uint64())) 878 } 879 // Wake up Results 880 if accepted > 0 { 881 q.active.Signal() 882 } 883 if failure == nil { 884 return accepted, nil 885 } 886 // If none of the data was good, it's a stale delivery 887 if accepted > 0 { 888 return accepted, fmt.Errorf("partial failure: %v", failure) 889 } 890 return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery) 891 } 892 893 // Prepare configures the result cache to allow accepting and caching inbound 894 // fetch results. 895 func (q *queue) Prepare(offset uint64, mode SyncMode) { 896 q.lock.Lock() 897 defer q.lock.Unlock() 898 899 // Prepare the queue for sync results 900 q.resultCache.Prepare(offset) 901 q.mode = mode 902 }