github.com/calmw/ethereum@v0.1.1/eth/downloader/queue.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the block download scheduler to collect download tasks and schedule 18 // them in an ordered, and throttled way. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/calmw/ethereum/common" 30 "github.com/calmw/ethereum/common/prque" 31 "github.com/calmw/ethereum/core/types" 32 "github.com/calmw/ethereum/log" 33 "github.com/calmw/ethereum/metrics" 34 ) 35 36 const ( 37 bodyType = uint(0) 38 receiptType = uint(1) 39 ) 40 41 var ( 42 blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download 43 blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks 44 blockCacheMemory = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching 45 blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones 46 ) 47 48 var ( 49 errNoFetchesPending = errors.New("no fetches pending") 50 errStaleDelivery = errors.New("stale delivery") 51 ) 52 53 // fetchRequest is a currently running data retrieval operation. 54 type fetchRequest struct { 55 Peer *peerConnection // Peer to which the request was sent 56 From uint64 // Requested chain element index (used for skeleton fills only) 57 Headers []*types.Header // Requested headers, sorted by request order 58 Time time.Time // Time when the request was made 59 } 60 61 // fetchResult is a struct collecting partial results from data fetchers until 62 // all outstanding pieces complete and the result as a whole can be processed. 63 type fetchResult struct { 64 pending atomic.Int32 // Flag telling what deliveries are outstanding 65 66 Header *types.Header 67 Uncles []*types.Header 68 Transactions types.Transactions 69 Receipts types.Receipts 70 Withdrawals types.Withdrawals 71 } 72 73 func newFetchResult(header *types.Header, fastSync bool) *fetchResult { 74 item := &fetchResult{ 75 Header: header, 76 } 77 if !header.EmptyBody() { 78 item.pending.Store(item.pending.Load() | (1 << bodyType)) 79 } else if header.WithdrawalsHash != nil { 80 item.Withdrawals = make(types.Withdrawals, 0) 81 } 82 if fastSync && !header.EmptyReceipts() { 83 item.pending.Store(item.pending.Load() | (1 << receiptType)) 84 } 85 return item 86 } 87 88 // SetBodyDone flags the body as finished. 89 func (f *fetchResult) SetBodyDone() { 90 if v := f.pending.Load(); (v & (1 << bodyType)) != 0 { 91 f.pending.Add(-1) 92 } 93 } 94 95 // AllDone checks if item is done. 96 func (f *fetchResult) AllDone() bool { 97 return f.pending.Load() == 0 98 } 99 100 // SetReceiptsDone flags the receipts as finished. 101 func (f *fetchResult) SetReceiptsDone() { 102 if v := f.pending.Load(); (v & (1 << receiptType)) != 0 { 103 f.pending.Add(-2) 104 } 105 } 106 107 // Done checks if the given type is done already 108 func (f *fetchResult) Done(kind uint) bool { 109 v := f.pending.Load() 110 return v&(1<<kind) == 0 111 } 112 113 // queue represents hashes that are either need fetching or are being fetched 114 type queue struct { 115 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 116 117 // Headers are "special", they download in batches, supported by a skeleton chain 118 headerHead common.Hash // Hash of the last queued header to verify order 119 headerTaskPool map[uint64]*types.Header // Pending header retrieval tasks, mapping starting indexes to skeleton headers 120 headerTaskQueue *prque.Prque[int64, uint64] // Priority queue of the skeleton indexes to fetch the filling headers for 121 headerPeerMiss map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable 122 headerPendPool map[string]*fetchRequest // Currently pending header retrieval operations 123 headerResults []*types.Header // Result cache accumulating the completed headers 124 headerHashes []common.Hash // Result cache accumulating the completed header hashes 125 headerProced int // Number of headers already processed from the results 126 headerOffset uint64 // Number of the first header in the result cache 127 headerContCh chan bool // Channel to notify when header download finishes 128 129 // All data retrievals below are based on an already assembles header chain 130 blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers 131 blockTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the blocks (bodies) for 132 blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations 133 blockWakeCh chan bool // Channel to notify the block fetcher of new tasks 134 135 receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers 136 receiptTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the receipts for 137 receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations 138 receiptWakeCh chan bool // Channel to notify when receipt fetcher of new tasks 139 140 resultCache *resultStore // Downloaded but not yet delivered fetch results 141 resultSize common.StorageSize // Approximate size of a block (exponential moving average) 142 143 lock *sync.RWMutex 144 active *sync.Cond 145 closed bool 146 147 logTime time.Time // Time instance when status was last reported 148 } 149 150 // newQueue creates a new download queue for scheduling block retrieval. 151 func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue { 152 lock := new(sync.RWMutex) 153 q := &queue{ 154 headerContCh: make(chan bool, 1), 155 blockTaskQueue: prque.New[int64, *types.Header](nil), 156 blockWakeCh: make(chan bool, 1), 157 receiptTaskQueue: prque.New[int64, *types.Header](nil), 158 receiptWakeCh: make(chan bool, 1), 159 active: sync.NewCond(lock), 160 lock: lock, 161 } 162 q.Reset(blockCacheLimit, thresholdInitialSize) 163 return q 164 } 165 166 // Reset clears out the queue contents. 167 func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) { 168 q.lock.Lock() 169 defer q.lock.Unlock() 170 171 q.closed = false 172 q.mode = FullSync 173 174 q.headerHead = common.Hash{} 175 q.headerPendPool = make(map[string]*fetchRequest) 176 177 q.blockTaskPool = make(map[common.Hash]*types.Header) 178 q.blockTaskQueue.Reset() 179 q.blockPendPool = make(map[string]*fetchRequest) 180 181 q.receiptTaskPool = make(map[common.Hash]*types.Header) 182 q.receiptTaskQueue.Reset() 183 q.receiptPendPool = make(map[string]*fetchRequest) 184 185 q.resultCache = newResultStore(blockCacheLimit) 186 q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize)) 187 } 188 189 // Close marks the end of the sync, unblocking Results. 190 // It may be called even if the queue is already closed. 191 func (q *queue) Close() { 192 q.lock.Lock() 193 q.closed = true 194 q.active.Signal() 195 q.lock.Unlock() 196 } 197 198 // PendingHeaders retrieves the number of header requests pending for retrieval. 199 func (q *queue) PendingHeaders() int { 200 q.lock.Lock() 201 defer q.lock.Unlock() 202 203 return q.headerTaskQueue.Size() 204 } 205 206 // PendingBodies retrieves the number of block body requests pending for retrieval. 207 func (q *queue) PendingBodies() int { 208 q.lock.Lock() 209 defer q.lock.Unlock() 210 211 return q.blockTaskQueue.Size() 212 } 213 214 // PendingReceipts retrieves the number of block receipts pending for retrieval. 215 func (q *queue) PendingReceipts() int { 216 q.lock.Lock() 217 defer q.lock.Unlock() 218 219 return q.receiptTaskQueue.Size() 220 } 221 222 // InFlightBlocks retrieves whether there are block fetch requests currently in 223 // flight. 224 func (q *queue) InFlightBlocks() bool { 225 q.lock.Lock() 226 defer q.lock.Unlock() 227 228 return len(q.blockPendPool) > 0 229 } 230 231 // InFlightReceipts retrieves whether there are receipt fetch requests currently 232 // in flight. 233 func (q *queue) InFlightReceipts() bool { 234 q.lock.Lock() 235 defer q.lock.Unlock() 236 237 return len(q.receiptPendPool) > 0 238 } 239 240 // Idle returns if the queue is fully idle or has some data still inside. 241 func (q *queue) Idle() bool { 242 q.lock.Lock() 243 defer q.lock.Unlock() 244 245 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() 246 pending := len(q.blockPendPool) + len(q.receiptPendPool) 247 248 return (queued + pending) == 0 249 } 250 251 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill 252 // up an already retrieved header skeleton. 253 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { 254 q.lock.Lock() 255 defer q.lock.Unlock() 256 257 // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) 258 if q.headerResults != nil { 259 panic("skeleton assembly already in progress") 260 } 261 // Schedule all the header retrieval tasks for the skeleton assembly 262 q.headerTaskPool = make(map[uint64]*types.Header) 263 q.headerTaskQueue = prque.New[int64, uint64](nil) 264 q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains 265 q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) 266 q.headerHashes = make([]common.Hash, len(skeleton)*MaxHeaderFetch) 267 q.headerProced = 0 268 q.headerOffset = from 269 q.headerContCh = make(chan bool, 1) 270 271 for i, header := range skeleton { 272 index := from + uint64(i*MaxHeaderFetch) 273 274 q.headerTaskPool[index] = header 275 q.headerTaskQueue.Push(index, -int64(index)) 276 } 277 } 278 279 // RetrieveHeaders retrieves the header chain assemble based on the scheduled 280 // skeleton. 281 func (q *queue) RetrieveHeaders() ([]*types.Header, []common.Hash, int) { 282 q.lock.Lock() 283 defer q.lock.Unlock() 284 285 headers, hashes, proced := q.headerResults, q.headerHashes, q.headerProced 286 q.headerResults, q.headerHashes, q.headerProced = nil, nil, 0 287 288 return headers, hashes, proced 289 } 290 291 // Schedule adds a set of headers for the download queue for scheduling, returning 292 // the new headers encountered. 293 func (q *queue) Schedule(headers []*types.Header, hashes []common.Hash, from uint64) []*types.Header { 294 q.lock.Lock() 295 defer q.lock.Unlock() 296 297 // Insert all the headers prioritised by the contained block number 298 inserts := make([]*types.Header, 0, len(headers)) 299 for i, header := range headers { 300 // Make sure chain order is honoured and preserved throughout 301 hash := hashes[i] 302 if header.Number == nil || header.Number.Uint64() != from { 303 log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) 304 break 305 } 306 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 307 log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 308 break 309 } 310 // Make sure no duplicate requests are executed 311 // We cannot skip this, even if the block is empty, since this is 312 // what triggers the fetchResult creation. 313 if _, ok := q.blockTaskPool[hash]; ok { 314 log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) 315 } else { 316 q.blockTaskPool[hash] = header 317 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 318 } 319 // Queue for receipt retrieval 320 if q.mode == SnapSync && !header.EmptyReceipts() { 321 if _, ok := q.receiptTaskPool[hash]; ok { 322 log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) 323 } else { 324 q.receiptTaskPool[hash] = header 325 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 326 } 327 } 328 inserts = append(inserts, header) 329 q.headerHead = hash 330 from++ 331 } 332 return inserts 333 } 334 335 // Results retrieves and permanently removes a batch of fetch results from 336 // the cache. the result slice will be empty if the queue has been closed. 337 // Results can be called concurrently with Deliver and Schedule, 338 // but assumes that there are not two simultaneous callers to Results 339 func (q *queue) Results(block bool) []*fetchResult { 340 // Abort early if there are no items and non-blocking requested 341 if !block && !q.resultCache.HasCompletedItems() { 342 return nil 343 } 344 closed := false 345 for !closed && !q.resultCache.HasCompletedItems() { 346 // In order to wait on 'active', we need to obtain the lock. 347 // That may take a while, if someone is delivering at the same 348 // time, so after obtaining the lock, we check again if there 349 // are any results to fetch. 350 // Also, in-between we ask for the lock and the lock is obtained, 351 // someone can have closed the queue. In that case, we should 352 // return the available results and stop blocking 353 q.lock.Lock() 354 if q.resultCache.HasCompletedItems() || q.closed { 355 q.lock.Unlock() 356 break 357 } 358 // No items available, and not closed 359 q.active.Wait() 360 closed = q.closed 361 q.lock.Unlock() 362 } 363 // Regardless if closed or not, we can still deliver whatever we have 364 results := q.resultCache.GetCompleted(maxResultsProcess) 365 for _, result := range results { 366 // Recalculate the result item weights to prevent memory exhaustion 367 size := result.Header.Size() 368 for _, uncle := range result.Uncles { 369 size += uncle.Size() 370 } 371 for _, receipt := range result.Receipts { 372 size += receipt.Size() 373 } 374 for _, tx := range result.Transactions { 375 size += common.StorageSize(tx.Size()) 376 } 377 q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + 378 (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize 379 } 380 // Using the newly calibrated resultsize, figure out the new throttle limit 381 // on the result cache 382 throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) 383 throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold) 384 385 // With results removed from the cache, wake throttled fetchers 386 for _, ch := range []chan bool{q.blockWakeCh, q.receiptWakeCh} { 387 select { 388 case ch <- true: 389 default: 390 } 391 } 392 // Log some info at certain times 393 if time.Since(q.logTime) >= 60*time.Second { 394 q.logTime = time.Now() 395 396 info := q.Stats() 397 info = append(info, "throttle", throttleThreshold) 398 log.Debug("Downloader queue stats", info...) 399 } 400 return results 401 } 402 403 func (q *queue) Stats() []interface{} { 404 q.lock.RLock() 405 defer q.lock.RUnlock() 406 407 return q.stats() 408 } 409 410 func (q *queue) stats() []interface{} { 411 return []interface{}{ 412 "receiptTasks", q.receiptTaskQueue.Size(), 413 "blockTasks", q.blockTaskQueue.Size(), 414 "itemSize", q.resultSize, 415 } 416 } 417 418 // ReserveHeaders reserves a set of headers for the given peer, skipping any 419 // previously failed batches. 420 func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { 421 q.lock.Lock() 422 defer q.lock.Unlock() 423 424 // Short circuit if the peer's already downloading something (sanity check to 425 // not corrupt state) 426 if _, ok := q.headerPendPool[p.id]; ok { 427 return nil 428 } 429 // Retrieve a batch of hashes, skipping previously failed ones 430 send, skip := uint64(0), []uint64{} 431 for send == 0 && !q.headerTaskQueue.Empty() { 432 from, _ := q.headerTaskQueue.Pop() 433 if q.headerPeerMiss[p.id] != nil { 434 if _, ok := q.headerPeerMiss[p.id][from]; ok { 435 skip = append(skip, from) 436 continue 437 } 438 } 439 send = from 440 } 441 // Merge all the skipped batches back 442 for _, from := range skip { 443 q.headerTaskQueue.Push(from, -int64(from)) 444 } 445 // Assemble and return the block download request 446 if send == 0 { 447 return nil 448 } 449 request := &fetchRequest{ 450 Peer: p, 451 From: send, 452 Time: time.Now(), 453 } 454 q.headerPendPool[p.id] = request 455 return request 456 } 457 458 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 459 // previously failed downloads. Beside the next batch of needed fetches, it also 460 // returns a flag whether empty blocks were queued requiring processing. 461 func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) { 462 q.lock.Lock() 463 defer q.lock.Unlock() 464 465 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType) 466 } 467 468 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 469 // any previously failed downloads. Beside the next batch of needed fetches, it 470 // also returns a flag whether empty receipts were queued requiring importing. 471 func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) { 472 q.lock.Lock() 473 defer q.lock.Unlock() 474 475 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType) 476 } 477 478 // reserveHeaders reserves a set of data download operations for a given peer, 479 // skipping any previously failed ones. This method is a generic version used 480 // by the individual special reservation functions. 481 // 482 // Note, this method expects the queue lock to be already held for writing. The 483 // reason the lock is not obtained in here is because the parameters already need 484 // to access the queue, so they already need a lock anyway. 485 // 486 // Returns: 487 // 488 // item - the fetchRequest 489 // progress - whether any progress was made 490 // throttle - if the caller should throttle for a while 491 func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque[int64, *types.Header], 492 pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) { 493 // Short circuit if the pool has been depleted, or if the peer's already 494 // downloading something (sanity check not to corrupt state) 495 if taskQueue.Empty() { 496 return nil, false, true 497 } 498 if _, ok := pendPool[p.id]; ok { 499 return nil, false, false 500 } 501 // Retrieve a batch of tasks, skipping previously failed ones 502 send := make([]*types.Header, 0, count) 503 skip := make([]*types.Header, 0) 504 progress := false 505 throttled := false 506 for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ { 507 // the task queue will pop items in order, so the highest prio block 508 // is also the lowest block number. 509 header, _ := taskQueue.Peek() 510 511 // we can ask the resultcache if this header is within the 512 // "prioritized" segment of blocks. If it is not, we need to throttle 513 514 stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == SnapSync) 515 if stale { 516 // Don't put back in the task queue, this item has already been 517 // delivered upstream 518 taskQueue.PopItem() 519 progress = true 520 delete(taskPool, header.Hash()) 521 proc = proc - 1 522 log.Error("Fetch reservation already delivered", "number", header.Number.Uint64()) 523 continue 524 } 525 if throttle { 526 // There are no resultslots available. Leave it in the task queue 527 // However, if there are any left as 'skipped', we should not tell 528 // the caller to throttle, since we still want some other 529 // peer to fetch those for us 530 throttled = len(skip) == 0 531 break 532 } 533 if err != nil { 534 // this most definitely should _not_ happen 535 log.Warn("Failed to reserve headers", "err", err) 536 // There are no resultslots available. Leave it in the task queue 537 break 538 } 539 if item.Done(kind) { 540 // If it's a noop, we can skip this task 541 delete(taskPool, header.Hash()) 542 taskQueue.PopItem() 543 proc = proc - 1 544 progress = true 545 continue 546 } 547 // Remove it from the task queue 548 taskQueue.PopItem() 549 // Otherwise unless the peer is known not to have the data, add to the retrieve list 550 if p.Lacks(header.Hash()) { 551 skip = append(skip, header) 552 } else { 553 send = append(send, header) 554 } 555 } 556 // Merge all the skipped headers back 557 for _, header := range skip { 558 taskQueue.Push(header, -int64(header.Number.Uint64())) 559 } 560 if q.resultCache.HasCompletedItems() { 561 // Wake Results, resultCache was modified 562 q.active.Signal() 563 } 564 // Assemble and return the block download request 565 if len(send) == 0 { 566 return nil, progress, throttled 567 } 568 request := &fetchRequest{ 569 Peer: p, 570 Headers: send, 571 Time: time.Now(), 572 } 573 pendPool[p.id] = request 574 return request, progress, throttled 575 } 576 577 // Revoke cancels all pending requests belonging to a given peer. This method is 578 // meant to be called during a peer drop to quickly reassign owned data fetches 579 // to remaining nodes. 580 func (q *queue) Revoke(peerID string) { 581 q.lock.Lock() 582 defer q.lock.Unlock() 583 584 if request, ok := q.headerPendPool[peerID]; ok { 585 q.headerTaskQueue.Push(request.From, -int64(request.From)) 586 delete(q.headerPendPool, peerID) 587 } 588 if request, ok := q.blockPendPool[peerID]; ok { 589 for _, header := range request.Headers { 590 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 591 } 592 delete(q.blockPendPool, peerID) 593 } 594 if request, ok := q.receiptPendPool[peerID]; ok { 595 for _, header := range request.Headers { 596 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 597 } 598 delete(q.receiptPendPool, peerID) 599 } 600 } 601 602 // ExpireHeaders cancels a request that timed out and moves the pending fetch 603 // task back into the queue for rescheduling. 604 func (q *queue) ExpireHeaders(peer string) int { 605 q.lock.Lock() 606 defer q.lock.Unlock() 607 608 headerTimeoutMeter.Mark(1) 609 return q.expire(peer, q.headerPendPool, q.headerTaskQueue) 610 } 611 612 // ExpireBodies checks for in flight block body requests that exceeded a timeout 613 // allowance, canceling them and returning the responsible peers for penalisation. 614 func (q *queue) ExpireBodies(peer string) int { 615 q.lock.Lock() 616 defer q.lock.Unlock() 617 618 bodyTimeoutMeter.Mark(1) 619 return q.expire(peer, q.blockPendPool, q.blockTaskQueue) 620 } 621 622 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 623 // allowance, canceling them and returning the responsible peers for penalisation. 624 func (q *queue) ExpireReceipts(peer string) int { 625 q.lock.Lock() 626 defer q.lock.Unlock() 627 628 receiptTimeoutMeter.Mark(1) 629 return q.expire(peer, q.receiptPendPool, q.receiptTaskQueue) 630 } 631 632 // expire is the generic check that moves a specific expired task from a pending 633 // pool back into a task pool. The syntax on the passed taskQueue is a bit weird 634 // as we would need a generic expire method to handle both types, but that is not 635 // supported at the moment at least (Go 1.19). 636 // 637 // Note, this method expects the queue lock to be already held. The reason the 638 // lock is not obtained in here is that the parameters already need to access 639 // the queue, so they already need a lock anyway. 640 func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue interface{}) int { 641 // Retrieve the request being expired and log an error if it's non-existent, 642 // as there's no order of events that should lead to such expirations. 643 req := pendPool[peer] 644 if req == nil { 645 log.Error("Expired request does not exist", "peer", peer) 646 return 0 647 } 648 delete(pendPool, peer) 649 650 // Return any non-satisfied requests to the pool 651 if req.From > 0 { 652 taskQueue.(*prque.Prque[int64, uint64]).Push(req.From, -int64(req.From)) 653 } 654 for _, header := range req.Headers { 655 taskQueue.(*prque.Prque[int64, *types.Header]).Push(header, -int64(header.Number.Uint64())) 656 } 657 return len(req.Headers) 658 } 659 660 // DeliverHeaders injects a header retrieval response into the header results 661 // cache. This method either accepts all headers it received, or none of them 662 // if they do not map correctly to the skeleton. 663 // 664 // If the headers are accepted, the method makes an attempt to deliver the set 665 // of ready headers to the processor to keep the pipeline full. However, it will 666 // not block to prevent stalling other pending deliveries. 667 func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []common.Hash, headerProcCh chan *headerTask) (int, error) { 668 q.lock.Lock() 669 defer q.lock.Unlock() 670 671 var logger log.Logger 672 if len(id) < 16 { 673 // Tests use short IDs, don't choke on them 674 logger = log.New("peer", id) 675 } else { 676 logger = log.New("peer", id[:16]) 677 } 678 // Short circuit if the data was never requested 679 request := q.headerPendPool[id] 680 if request == nil { 681 headerDropMeter.Mark(int64(len(headers))) 682 return 0, errNoFetchesPending 683 } 684 delete(q.headerPendPool, id) 685 686 headerReqTimer.UpdateSince(request.Time) 687 headerInMeter.Mark(int64(len(headers))) 688 689 // Ensure headers can be mapped onto the skeleton chain 690 target := q.headerTaskPool[request.From].Hash() 691 692 accepted := len(headers) == MaxHeaderFetch 693 if accepted { 694 if headers[0].Number.Uint64() != request.From { 695 logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", hashes[0], "expected", request.From) 696 accepted = false 697 } else if hashes[len(headers)-1] != target { 698 logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", hashes[len(headers)-1], "expected", target) 699 accepted = false 700 } 701 } 702 if accepted { 703 parentHash := hashes[0] 704 for i, header := range headers[1:] { 705 hash := hashes[i+1] 706 if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { 707 logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want) 708 accepted = false 709 break 710 } 711 if parentHash != header.ParentHash { 712 logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 713 accepted = false 714 break 715 } 716 // Set-up parent hash for next round 717 parentHash = hash 718 } 719 } 720 // If the batch of headers wasn't accepted, mark as unavailable 721 if !accepted { 722 logger.Trace("Skeleton filling not accepted", "from", request.From) 723 headerDropMeter.Mark(int64(len(headers))) 724 725 miss := q.headerPeerMiss[id] 726 if miss == nil { 727 q.headerPeerMiss[id] = make(map[uint64]struct{}) 728 miss = q.headerPeerMiss[id] 729 } 730 miss[request.From] = struct{}{} 731 732 q.headerTaskQueue.Push(request.From, -int64(request.From)) 733 return 0, errors.New("delivery not accepted") 734 } 735 // Clean up a successful fetch and try to deliver any sub-results 736 copy(q.headerResults[request.From-q.headerOffset:], headers) 737 copy(q.headerHashes[request.From-q.headerOffset:], hashes) 738 739 delete(q.headerTaskPool, request.From) 740 741 ready := 0 742 for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { 743 ready += MaxHeaderFetch 744 } 745 if ready > 0 { 746 // Headers are ready for delivery, gather them and push forward (non blocking) 747 processHeaders := make([]*types.Header, ready) 748 copy(processHeaders, q.headerResults[q.headerProced:q.headerProced+ready]) 749 750 processHashes := make([]common.Hash, ready) 751 copy(processHashes, q.headerHashes[q.headerProced:q.headerProced+ready]) 752 753 select { 754 case headerProcCh <- &headerTask{ 755 headers: processHeaders, 756 hashes: processHashes, 757 }: 758 logger.Trace("Pre-scheduled new headers", "count", len(processHeaders), "from", processHeaders[0].Number) 759 q.headerProced += len(processHeaders) 760 default: 761 } 762 } 763 // Check for termination and return 764 if len(q.headerTaskPool) == 0 { 765 q.headerContCh <- false 766 } 767 return len(headers), nil 768 } 769 770 // DeliverBodies injects a block body retrieval response into the results queue. 771 // The method returns the number of blocks bodies accepted from the delivery and 772 // also wakes any threads waiting for data delivery. 773 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash, 774 uncleLists [][]*types.Header, uncleListHashes []common.Hash, 775 withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash) (int, error) { 776 q.lock.Lock() 777 defer q.lock.Unlock() 778 779 validate := func(index int, header *types.Header) error { 780 if txListHashes[index] != header.TxHash { 781 return errInvalidBody 782 } 783 if uncleListHashes[index] != header.UncleHash { 784 return errInvalidBody 785 } 786 if header.WithdrawalsHash == nil { 787 // nil hash means that withdrawals should not be present in body 788 if withdrawalLists[index] != nil { 789 return errInvalidBody 790 } 791 } else { // non-nil hash: body must have withdrawals 792 if withdrawalLists[index] == nil { 793 return errInvalidBody 794 } 795 if withdrawalListHashes[index] != *header.WithdrawalsHash { 796 return errInvalidBody 797 } 798 } 799 return nil 800 } 801 802 reconstruct := func(index int, result *fetchResult) { 803 result.Transactions = txLists[index] 804 result.Uncles = uncleLists[index] 805 result.Withdrawals = withdrawalLists[index] 806 result.SetBodyDone() 807 } 808 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, 809 bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct) 810 } 811 812 // DeliverReceipts injects a receipt retrieval response into the results queue. 813 // The method returns the number of transaction receipts accepted from the delivery 814 // and also wakes any threads waiting for data delivery. 815 func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt, receiptListHashes []common.Hash) (int, error) { 816 q.lock.Lock() 817 defer q.lock.Unlock() 818 819 validate := func(index int, header *types.Header) error { 820 if receiptListHashes[index] != header.ReceiptHash { 821 return errInvalidReceipt 822 } 823 return nil 824 } 825 reconstruct := func(index int, result *fetchResult) { 826 result.Receipts = receiptList[index] 827 result.SetReceiptsDone() 828 } 829 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, 830 receiptReqTimer, receiptInMeter, receiptDropMeter, len(receiptList), validate, reconstruct) 831 } 832 833 // deliver injects a data retrieval response into the results queue. 834 // 835 // Note, this method expects the queue lock to be already held for writing. The 836 // reason this lock is not obtained in here is because the parameters already need 837 // to access the queue, so they already need a lock anyway. 838 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, 839 taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest, 840 reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter, 841 results int, validate func(index int, header *types.Header) error, 842 reconstruct func(index int, result *fetchResult)) (int, error) { 843 // Short circuit if the data was never requested 844 request := pendPool[id] 845 if request == nil { 846 resDropMeter.Mark(int64(results)) 847 return 0, errNoFetchesPending 848 } 849 delete(pendPool, id) 850 851 reqTimer.UpdateSince(request.Time) 852 resInMeter.Mark(int64(results)) 853 854 // If no data items were retrieved, mark them as unavailable for the origin peer 855 if results == 0 { 856 for _, header := range request.Headers { 857 request.Peer.MarkLacking(header.Hash()) 858 } 859 } 860 // Assemble each of the results with their headers and retrieved data parts 861 var ( 862 accepted int 863 failure error 864 i int 865 hashes []common.Hash 866 ) 867 for _, header := range request.Headers { 868 // Short circuit assembly if no more fetch results are found 869 if i >= results { 870 break 871 } 872 // Validate the fields 873 if err := validate(i, header); err != nil { 874 failure = err 875 break 876 } 877 hashes = append(hashes, header.Hash()) 878 i++ 879 } 880 881 for _, header := range request.Headers[:i] { 882 if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil && !stale { 883 reconstruct(accepted, res) 884 } else { 885 // else: between here and above, some other peer filled this result, 886 // or it was indeed a no-op. This should not happen, but if it does it's 887 // not something to panic about 888 log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) 889 failure = errStaleDelivery 890 } 891 // Clean up a successful fetch 892 delete(taskPool, hashes[accepted]) 893 accepted++ 894 } 895 resDropMeter.Mark(int64(results - accepted)) 896 897 // Return all failed or missing fetches to the queue 898 for _, header := range request.Headers[accepted:] { 899 taskQueue.Push(header, -int64(header.Number.Uint64())) 900 } 901 // Wake up Results 902 if accepted > 0 { 903 q.active.Signal() 904 } 905 if failure == nil { 906 return accepted, nil 907 } 908 // If none of the data was good, it's a stale delivery 909 if accepted > 0 { 910 return accepted, fmt.Errorf("partial failure: %v", failure) 911 } 912 return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery) 913 } 914 915 // Prepare configures the result cache to allow accepting and caching inbound 916 // fetch results. 917 func (q *queue) Prepare(offset uint64, mode SyncMode) { 918 q.lock.Lock() 919 defer q.lock.Unlock() 920 921 // Prepare the queue for sync results 922 q.resultCache.Prepare(offset) 923 q.mode = mode 924 }