github.com/core-coin/go-core/v2@v2.1.9/xcb/downloader/queue.go (about) 1 // Copyright 2015 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the block download scheduler to collect download tasks and schedule 18 // them in an ordered, and throttled way. 19 20 package downloader 21 22 import ( 23 "errors" 24 "fmt" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/core-coin/go-core/v2/common" 30 "github.com/core-coin/go-core/v2/common/prque" 31 "github.com/core-coin/go-core/v2/core/types" 32 "github.com/core-coin/go-core/v2/log" 33 "github.com/core-coin/go-core/v2/metrics" 34 "github.com/core-coin/go-core/v2/trie" 35 ) 36 37 const ( 38 bodyType = uint(0) 39 receiptType = uint(1) 40 ) 41 42 var ( 43 blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download 44 blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks 45 blockCacheMemory = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching 46 blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones 47 ) 48 49 var ( 50 errNoFetchesPending = errors.New("no fetches pending") 51 errStaleDelivery = errors.New("stale delivery") 52 ) 53 54 // fetchRequest is a currently running data retrieval operation. 55 type fetchRequest struct { 56 Peer *peerConnection // Peer to which the request was sent 57 From uint64 // [xcb/62] Requested chain element index (used for skeleton fills only) 58 Headers []*types.Header // [xcb/62] Requested headers, sorted by request order 59 Time time.Time // Time when the request was made 60 } 61 62 // fetchResult is a struct collecting partial results from data fetchers until 63 // all outstanding pieces complete and the result as a whole can be processed. 64 type fetchResult struct { 65 pending int32 // Flag telling what deliveries are outstanding 66 67 Header *types.Header 68 Uncles []*types.Header 69 Transactions types.Transactions 70 Receipts types.Receipts 71 } 72 73 func newFetchResult(header *types.Header, fastSync bool) *fetchResult { 74 item := &fetchResult{ 75 Header: header, 76 } 77 if !header.EmptyBody() { 78 item.pending |= (1 << bodyType) 79 } 80 if fastSync && !header.EmptyReceipts() { 81 item.pending |= (1 << receiptType) 82 } 83 return item 84 } 85 86 // SetBodyDone flags the body as finished. 87 func (f *fetchResult) SetBodyDone() { 88 if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 { 89 atomic.AddInt32(&f.pending, -1) 90 } 91 } 92 93 // AllDone checks if item is done. 94 func (f *fetchResult) AllDone() bool { 95 return atomic.LoadInt32(&f.pending) == 0 96 } 97 98 // SetReceiptsDone flags the receipts as finished. 99 func (f *fetchResult) SetReceiptsDone() { 100 if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 { 101 atomic.AddInt32(&f.pending, -2) 102 } 103 } 104 105 // Done checks if the given type is done already 106 func (f *fetchResult) Done(kind uint) bool { 107 v := atomic.LoadInt32(&f.pending) 108 return v&(1<<kind) == 0 109 } 110 111 // queue represents hashes that are either need fetching or are being fetched 112 type queue struct { 113 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 114 115 // Headers are "special", they download in batches, supported by a skeleton chain 116 headerHead common.Hash // [xcb/62] Hash of the last queued header to verify order 117 headerTaskPool map[uint64]*types.Header // [xcb/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers 118 headerTaskQueue *prque.Prque // [xcb/62] Priority queue of the skeleton indexes to fetch the filling headers for 119 headerPeerMiss map[string]map[uint64]struct{} // [xcb/62] Set of per-peer header batches known to be unavailable 120 headerPendPool map[string]*fetchRequest // [xcb/62] Currently pending header retrieval operations 121 headerResults []*types.Header // [xcb/62] Result cache accumulating the completed headers 122 headerProced int // [xcb/62] Number of headers already processed from the results 123 headerOffset uint64 // [xcb/62] Number of the first header in the result cache 124 headerContCh chan bool // [xcb/62] Channel to notify when header download finishes 125 126 // All data retrievals below are based on an already assembles header chain 127 blockTaskPool map[common.Hash]*types.Header // [xcb/62] Pending block (body) retrieval tasks, mapping hashes to headers 128 blockTaskQueue *prque.Prque // [xcb/62] Priority queue of the headers to fetch the blocks (bodies) for 129 blockPendPool map[string]*fetchRequest // [xcb/62] Currently pending block (body) retrieval operations 130 131 receiptTaskPool map[common.Hash]*types.Header // [xcb/63] Pending receipt retrieval tasks, mapping hashes to headers 132 receiptTaskQueue *prque.Prque // [xcb/63] Priority queue of the headers to fetch the receipts for 133 receiptPendPool map[string]*fetchRequest // [xcb/63] Currently pending receipt retrieval operations 134 135 resultCache *resultStore // Downloaded but not yet delivered fetch results 136 resultSize common.StorageSize // Approximate size of a block (exponential moving average) 137 138 lock *sync.RWMutex 139 active *sync.Cond 140 closed bool 141 142 lastStatLog time.Time 143 } 144 145 // newQueue creates a new download queue for scheduling block retrieval. 146 func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue { 147 lock := new(sync.RWMutex) 148 q := &queue{ 149 headerContCh: make(chan bool), 150 blockTaskQueue: prque.New(nil), 151 receiptTaskQueue: prque.New(nil), 152 active: sync.NewCond(lock), 153 lock: lock, 154 } 155 q.Reset(blockCacheLimit, thresholdInitialSize) 156 return q 157 } 158 159 // Reset clears out the queue contents. 160 func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) { 161 q.lock.Lock() 162 defer q.lock.Unlock() 163 164 q.closed = false 165 q.mode = FullSync 166 167 q.headerHead = common.Hash{} 168 q.headerPendPool = make(map[string]*fetchRequest) 169 170 q.blockTaskPool = make(map[common.Hash]*types.Header) 171 q.blockTaskQueue.Reset() 172 q.blockPendPool = make(map[string]*fetchRequest) 173 174 q.receiptTaskPool = make(map[common.Hash]*types.Header) 175 q.receiptTaskQueue.Reset() 176 q.receiptPendPool = make(map[string]*fetchRequest) 177 178 q.resultCache = newResultStore(blockCacheLimit) 179 q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize)) 180 } 181 182 // Close marks the end of the sync, unblocking Results. 183 // It may be called even if the queue is already closed. 184 func (q *queue) Close() { 185 q.lock.Lock() 186 q.closed = true 187 q.active.Signal() 188 q.lock.Unlock() 189 } 190 191 // PendingHeaders retrieves the number of header requests pending for retrieval. 192 func (q *queue) PendingHeaders() int { 193 q.lock.Lock() 194 defer q.lock.Unlock() 195 196 return q.headerTaskQueue.Size() 197 } 198 199 // PendingBlocks retrieves the number of block (body) requests pending for retrieval. 200 func (q *queue) PendingBlocks() int { 201 q.lock.Lock() 202 defer q.lock.Unlock() 203 204 return q.blockTaskQueue.Size() 205 } 206 207 // PendingReceipts retrieves the number of block receipts pending for retrieval. 208 func (q *queue) PendingReceipts() int { 209 q.lock.Lock() 210 defer q.lock.Unlock() 211 212 return q.receiptTaskQueue.Size() 213 } 214 215 // InFlightHeaders retrieves whether there are header fetch requests currently 216 // in flight. 217 func (q *queue) InFlightHeaders() bool { 218 q.lock.Lock() 219 defer q.lock.Unlock() 220 221 return len(q.headerPendPool) > 0 222 } 223 224 // InFlightBlocks retrieves whether there are block fetch requests currently in 225 // flight. 226 func (q *queue) InFlightBlocks() bool { 227 q.lock.Lock() 228 defer q.lock.Unlock() 229 230 return len(q.blockPendPool) > 0 231 } 232 233 // InFlightReceipts retrieves whether there are receipt fetch requests currently 234 // in flight. 235 func (q *queue) InFlightReceipts() bool { 236 q.lock.Lock() 237 defer q.lock.Unlock() 238 239 return len(q.receiptPendPool) > 0 240 } 241 242 // Idle returns if the queue is fully idle or has some data still inside. 243 func (q *queue) Idle() bool { 244 q.lock.Lock() 245 defer q.lock.Unlock() 246 247 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() 248 pending := len(q.blockPendPool) + len(q.receiptPendPool) 249 250 return (queued + pending) == 0 251 } 252 253 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill 254 // up an already retrieved header skeleton. 255 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { 256 q.lock.Lock() 257 defer q.lock.Unlock() 258 259 // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) 260 if q.headerResults != nil { 261 panic("skeleton assembly already in progress") 262 } 263 // Schedule all the header retrieval tasks for the skeleton assembly 264 q.headerTaskPool = make(map[uint64]*types.Header) 265 q.headerTaskQueue = prque.New(nil) 266 q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains 267 q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) 268 q.headerProced = 0 269 q.headerOffset = from 270 q.headerContCh = make(chan bool, 1) 271 272 for i, header := range skeleton { 273 index := from + uint64(i*MaxHeaderFetch) 274 275 q.headerTaskPool[index] = header 276 q.headerTaskQueue.Push(index, -int64(index)) 277 } 278 } 279 280 // RetrieveHeaders retrieves the header chain assemble based on the scheduled 281 // skeleton. 282 func (q *queue) RetrieveHeaders() ([]*types.Header, int) { 283 q.lock.Lock() 284 defer q.lock.Unlock() 285 286 headers, proced := q.headerResults, q.headerProced 287 q.headerResults, q.headerProced = nil, 0 288 289 return headers, proced 290 } 291 292 // Schedule adds a set of headers for the download queue for scheduling, returning 293 // the new headers encountered. 294 func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { 295 q.lock.Lock() 296 defer q.lock.Unlock() 297 298 // Insert all the headers prioritised by the contained block number 299 inserts := make([]*types.Header, 0, len(headers)) 300 for _, header := range headers { 301 // Make sure chain order is honoured and preserved throughout 302 hash := header.Hash() 303 if header.Number == nil || header.Number.Uint64() != from { 304 log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) 305 break 306 } 307 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 308 log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 309 break 310 } 311 // Make sure no duplicate requests are executed 312 // We cannot skip this, even if the block is empty, since this is 313 // what triggers the fetchResult creation. 314 if _, ok := q.blockTaskPool[hash]; ok { 315 log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) 316 } else { 317 q.blockTaskPool[hash] = header 318 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 319 } 320 // Queue for receipt retrieval 321 if q.mode == FastSync && !header.EmptyReceipts() { 322 if _, ok := q.receiptTaskPool[hash]; ok { 323 log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) 324 } else { 325 q.receiptTaskPool[hash] = header 326 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 327 } 328 } 329 inserts = append(inserts, header) 330 q.headerHead = hash 331 from++ 332 } 333 return inserts 334 } 335 336 // Results retrieves and permanently removes a batch of fetch results from 337 // the cache. the result slice will be empty if the queue has been closed. 338 // Results can be called concurrently with Deliver and Schedule, 339 // but assumes that there are not two simultaneous callers to Results 340 func (q *queue) Results(block bool) []*fetchResult { 341 // Abort early if there are no items and non-blocking requested 342 if !block && !q.resultCache.HasCompletedItems() { 343 return nil 344 } 345 closed := false 346 for !closed && !q.resultCache.HasCompletedItems() { 347 // In order to wait on 'active', we need to obtain the lock. 348 // That may take a while, if someone is delivering at the same 349 // time, so after obtaining the lock, we check again if there 350 // are any results to fetch. 351 // Also, in-between we ask for the lock and the lock is obtained, 352 // someone can have closed the queue. In that case, we should 353 // return the available results and stop blocking 354 q.lock.Lock() 355 if q.resultCache.HasCompletedItems() || q.closed { 356 q.lock.Unlock() 357 break 358 } 359 // No items available, and not closed 360 q.active.Wait() 361 closed = q.closed 362 q.lock.Unlock() 363 } 364 // Regardless if closed or not, we can still deliver whatever we have 365 results := q.resultCache.GetCompleted(maxResultsProcess) 366 for _, result := range results { 367 // Recalculate the result item weights to prevent memory exhaustion 368 size := result.Header.Size() 369 for _, uncle := range result.Uncles { 370 size += uncle.Size() 371 } 372 for _, receipt := range result.Receipts { 373 size += receipt.Size() 374 } 375 for _, tx := range result.Transactions { 376 size += tx.Size() 377 } 378 q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + 379 (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize 380 } 381 // Using the newly calibrated resultsize, figure out the new throttle limit 382 // on the result cache 383 throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) 384 throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold) 385 386 // Log some info at certain times 387 if time.Since(q.lastStatLog) > 60*time.Second { 388 q.lastStatLog = time.Now() 389 info := q.Stats() 390 info = append(info, "throttle", throttleThreshold) 391 log.Info("Downloader queue stats", info...) 392 } 393 return results 394 } 395 396 func (q *queue) Stats() []interface{} { 397 q.lock.RLock() 398 defer q.lock.RUnlock() 399 400 return q.stats() 401 } 402 403 func (q *queue) stats() []interface{} { 404 return []interface{}{ 405 "receiptTasks", q.receiptTaskQueue.Size(), 406 "blockTasks", q.blockTaskQueue.Size(), 407 "itemSize", q.resultSize, 408 } 409 } 410 411 // ReserveHeaders reserves a set of headers for the given peer, skipping any 412 // previously failed batches. 413 func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { 414 q.lock.Lock() 415 defer q.lock.Unlock() 416 417 // Short circuit if the peer's already downloading something (sanity check to 418 // not corrupt state) 419 if _, ok := q.headerPendPool[p.id]; ok { 420 return nil 421 } 422 // Retrieve a batch of hashes, skipping previously failed ones 423 send, skip := uint64(0), []uint64{} 424 for send == 0 && !q.headerTaskQueue.Empty() { 425 from, _ := q.headerTaskQueue.Pop() 426 if q.headerPeerMiss[p.id] != nil { 427 if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { 428 skip = append(skip, from.(uint64)) 429 continue 430 } 431 } 432 send = from.(uint64) 433 } 434 // Merge all the skipped batches back 435 for _, from := range skip { 436 q.headerTaskQueue.Push(from, -int64(from)) 437 } 438 // Assemble and return the block download request 439 if send == 0 { 440 return nil 441 } 442 request := &fetchRequest{ 443 Peer: p, 444 From: send, 445 Time: time.Now(), 446 } 447 q.headerPendPool[p.id] = request 448 return request 449 } 450 451 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 452 // previously failed downloads. Beside the next batch of needed fetches, it also 453 // returns a flag whether empty blocks were queued requiring processing. 454 func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) { 455 q.lock.Lock() 456 defer q.lock.Unlock() 457 458 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType) 459 } 460 461 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 462 // any previously failed downloads. Beside the next batch of needed fetches, it 463 // also returns a flag whether empty receipts were queued requiring importing. 464 func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) { 465 q.lock.Lock() 466 defer q.lock.Unlock() 467 468 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType) 469 } 470 471 // reserveHeaders reserves a set of data download operations for a given peer, 472 // skipping any previously failed ones. This method is a generic version used 473 // by the individual special reservation functions. 474 // 475 // Note, this method expects the queue lock to be already held for writing. The 476 // reason the lock is not obtained in here is because the parameters already need 477 // to access the queue, so they already need a lock anyway. 478 // 479 // Returns: 480 // 481 // item - the fetchRequest 482 // progress - whether any progress was made 483 // throttle - if the caller should throttle for a while 484 func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 485 pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) { 486 // Short circuit if the pool has been depleted, or if the peer's already 487 // downloading something (sanity check not to corrupt state) 488 if taskQueue.Empty() { 489 return nil, false, true 490 } 491 if _, ok := pendPool[p.id]; ok { 492 return nil, false, false 493 } 494 // Retrieve a batch of tasks, skipping previously failed ones 495 send := make([]*types.Header, 0, count) 496 skip := make([]*types.Header, 0) 497 progress := false 498 throttled := false 499 for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ { 500 // the task queue will pop items in order, so the highest prio block 501 // is also the lowest block number. 502 h, _ := taskQueue.Peek() 503 header := h.(*types.Header) 504 // we can ask the resultcache if this header is within the 505 // "prioritized" segment of blocks. If it is not, we need to throttle 506 507 stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync) 508 if stale { 509 // Don't put back in the task queue, this item has already been 510 // delivered upstream 511 taskQueue.PopItem() 512 progress = true 513 delete(taskPool, header.Hash()) 514 proc = proc - 1 515 log.Error("Fetch reservation already delivered", "number", header.Number.Uint64()) 516 continue 517 } 518 if throttle { 519 // There are no resultslots available. Leave it in the task queue 520 // However, if there are any left as 'skipped', we should not tell 521 // the caller to throttle, since we still want some other 522 // peer to fetch those for us 523 throttled = len(skip) == 0 524 break 525 } 526 if err != nil { 527 // this most definitely should _not_ happen 528 log.Warn("Failed to reserve headers", "err", err) 529 // There are no resultslots available. Leave it in the task queue 530 break 531 } 532 if item.Done(kind) { 533 // If it's a noop, we can skip this task 534 delete(taskPool, header.Hash()) 535 taskQueue.PopItem() 536 proc = proc - 1 537 progress = true 538 continue 539 } 540 // Remove it from the task queue 541 taskQueue.PopItem() 542 // Otherwise unless the peer is known not to have the data, add to the retrieve list 543 if p.Lacks(header.Hash()) { 544 skip = append(skip, header) 545 } else { 546 send = append(send, header) 547 } 548 } 549 // Merge all the skipped headers back 550 for _, header := range skip { 551 taskQueue.Push(header, -int64(header.Number.Uint64())) 552 } 553 if q.resultCache.HasCompletedItems() { 554 // Wake Results, resultCache was modified 555 q.active.Signal() 556 } 557 // Assemble and return the block download request 558 if len(send) == 0 { 559 return nil, progress, throttled 560 } 561 request := &fetchRequest{ 562 Peer: p, 563 Headers: send, 564 Time: time.Now(), 565 } 566 pendPool[p.id] = request 567 return request, progress, throttled 568 } 569 570 // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. 571 func (q *queue) CancelHeaders(request *fetchRequest) { 572 q.lock.Lock() 573 defer q.lock.Unlock() 574 q.cancel(request, q.headerTaskQueue, q.headerPendPool) 575 } 576 577 // CancelBodies aborts a body fetch request, returning all pending headers to the 578 // task queue. 579 func (q *queue) CancelBodies(request *fetchRequest) { 580 q.lock.Lock() 581 defer q.lock.Unlock() 582 q.cancel(request, q.blockTaskQueue, q.blockPendPool) 583 } 584 585 // CancelReceipts aborts a body fetch request, returning all pending headers to 586 // the task queue. 587 func (q *queue) CancelReceipts(request *fetchRequest) { 588 q.lock.Lock() 589 defer q.lock.Unlock() 590 q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) 591 } 592 593 // Cancel aborts a fetch request, returning all pending hashes to the task queue. 594 func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { 595 if request.From > 0 { 596 taskQueue.Push(request.From, -int64(request.From)) 597 } 598 for _, header := range request.Headers { 599 taskQueue.Push(header, -int64(header.Number.Uint64())) 600 } 601 delete(pendPool, request.Peer.id) 602 } 603 604 // Revoke cancels all pending requests belonging to a given peer. This method is 605 // meant to be called during a peer drop to quickly reassign owned data fetches 606 // to remaining nodes. 607 func (q *queue) Revoke(peerID string) { 608 q.lock.Lock() 609 defer q.lock.Unlock() 610 611 if request, ok := q.blockPendPool[peerID]; ok { 612 for _, header := range request.Headers { 613 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 614 } 615 delete(q.blockPendPool, peerID) 616 } 617 if request, ok := q.receiptPendPool[peerID]; ok { 618 for _, header := range request.Headers { 619 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 620 } 621 delete(q.receiptPendPool, peerID) 622 } 623 } 624 625 // ExpireHeaders checks for in flight requests that exceeded a timeout allowance, 626 // canceling them and returning the responsible peers for penalisation. 627 func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { 628 q.lock.Lock() 629 defer q.lock.Unlock() 630 631 return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) 632 } 633 634 // ExpireBodies checks for in flight block body requests that exceeded a timeout 635 // allowance, canceling them and returning the responsible peers for penalisation. 636 func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { 637 q.lock.Lock() 638 defer q.lock.Unlock() 639 640 return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) 641 } 642 643 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 644 // allowance, canceling them and returning the responsible peers for penalisation. 645 func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { 646 q.lock.Lock() 647 defer q.lock.Unlock() 648 649 return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) 650 } 651 652 // expire is the generic check that move expired tasks from a pending pool back 653 // into a task pool, returning all entities caught with expired tasks. 654 // 655 // Note, this method expects the queue lock to be already held. The 656 // reason the lock is not obtained in here is because the parameters already need 657 // to access the queue, so they already need a lock anyway. 658 func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { 659 // Iterate over the expired requests and return each to the queue 660 expiries := make(map[string]int) 661 for id, request := range pendPool { 662 if time.Since(request.Time) > timeout { 663 // Update the metrics with the timeout 664 timeoutMeter.Mark(1) 665 666 // Return any non satisfied requests to the pool 667 if request.From > 0 { 668 taskQueue.Push(request.From, -int64(request.From)) 669 } 670 for _, header := range request.Headers { 671 taskQueue.Push(header, -int64(header.Number.Uint64())) 672 } 673 // Add the peer to the expiry report along the number of failed requests 674 expiries[id] = len(request.Headers) 675 676 // Remove the expired requests from the pending pool directly 677 delete(pendPool, id) 678 } 679 } 680 return expiries 681 } 682 683 // DeliverHeaders injects a header retrieval response into the header results 684 // cache. This method either accepts all headers it received, or none of them 685 // if they do not map correctly to the skeleton. 686 // 687 // If the headers are accepted, the method makes an attempt to deliver the set 688 // of ready headers to the processor to keep the pipeline full. However it will 689 // not block to prevent stalling other pending deliveries. 690 func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { 691 q.lock.Lock() 692 defer q.lock.Unlock() 693 694 // Short circuit if the data was never requested 695 request := q.headerPendPool[id] 696 if request == nil { 697 return 0, errNoFetchesPending 698 } 699 headerReqTimer.UpdateSince(request.Time) 700 delete(q.headerPendPool, id) 701 702 // Ensure headers can be mapped onto the skeleton chain 703 target := q.headerTaskPool[request.From].Hash() 704 705 accepted := len(headers) == MaxHeaderFetch 706 if accepted { 707 if headers[0].Number.Uint64() != request.From { 708 log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From) 709 accepted = false 710 } else if headers[len(headers)-1].Hash() != target { 711 log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) 712 accepted = false 713 } 714 } 715 if accepted { 716 parentHash := headers[0].Hash() 717 for i, header := range headers[1:] { 718 hash := header.Hash() 719 if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { 720 log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want) 721 accepted = false 722 break 723 } 724 if parentHash != header.ParentHash { 725 log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash) 726 accepted = false 727 break 728 } 729 // Set-up parent hash for next round 730 parentHash = hash 731 } 732 } 733 // If the batch of headers wasn't accepted, mark as unavailable 734 if !accepted { 735 log.Trace("Skeleton filling not accepted", "peer", id, "from", request.From) 736 737 miss := q.headerPeerMiss[id] 738 if miss == nil { 739 q.headerPeerMiss[id] = make(map[uint64]struct{}) 740 miss = q.headerPeerMiss[id] 741 } 742 miss[request.From] = struct{}{} 743 744 q.headerTaskQueue.Push(request.From, -int64(request.From)) 745 return 0, errors.New("delivery not accepted") 746 } 747 // Clean up a successful fetch and try to deliver any sub-results 748 copy(q.headerResults[request.From-q.headerOffset:], headers) 749 delete(q.headerTaskPool, request.From) 750 751 ready := 0 752 for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { 753 ready += MaxHeaderFetch 754 } 755 if ready > 0 { 756 // Headers are ready for delivery, gather them and push forward (non blocking) 757 process := make([]*types.Header, ready) 758 copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) 759 760 select { 761 case headerProcCh <- process: 762 log.Trace("Pre-scheduled new headers", "peer", id, "count", len(process), "from", process[0].Number) 763 q.headerProced += len(process) 764 default: 765 } 766 } 767 // Check for termination and return 768 if len(q.headerTaskPool) == 0 { 769 q.headerContCh <- false 770 } 771 return len(headers), nil 772 } 773 774 // DeliverBodies injects a block body retrieval response into the results queue. 775 // The method returns the number of blocks bodies accepted from the delivery and 776 // also wakes any threads waiting for data delivery. 777 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { 778 q.lock.Lock() 779 defer q.lock.Unlock() 780 validate := func(index int, header *types.Header) error { 781 if types.DeriveSha(types.Transactions(txLists[index]), trie.NewStackTrie(nil)) != header.TxHash { 782 return errInvalidBody 783 } 784 if types.CalcUncleHash(uncleLists[index]) != header.UncleHash { 785 return errInvalidBody 786 } 787 return nil 788 } 789 790 reconstruct := func(index int, result *fetchResult) { 791 result.Transactions = txLists[index] 792 result.Uncles = uncleLists[index] 793 result.SetBodyDone() 794 } 795 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, 796 bodyReqTimer, len(txLists), validate, reconstruct) 797 } 798 799 // DeliverReceipts injects a receipt retrieval response into the results queue. 800 // The method returns the number of transaction receipts accepted from the delivery 801 // and also wakes any threads waiting for data delivery. 802 func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { 803 q.lock.Lock() 804 defer q.lock.Unlock() 805 validate := func(index int, header *types.Header) error { 806 if types.DeriveSha(types.Receipts(receiptList[index]), trie.NewStackTrie(nil)) != header.ReceiptHash { 807 return errInvalidReceipt 808 } 809 return nil 810 } 811 reconstruct := func(index int, result *fetchResult) { 812 result.Receipts = receiptList[index] 813 result.SetReceiptsDone() 814 } 815 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, 816 receiptReqTimer, len(receiptList), validate, reconstruct) 817 } 818 819 // deliver injects a data retrieval response into the results queue. 820 // 821 // Note, this method expects the queue lock to be already held for writing. The 822 // reason this lock is not obtained in here is because the parameters already need 823 // to access the queue, so they already need a lock anyway. 824 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, 825 taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer, 826 results int, validate func(index int, header *types.Header) error, 827 reconstruct func(index int, result *fetchResult)) (int, error) { 828 829 // Short circuit if the data was never requested 830 request := pendPool[id] 831 if request == nil { 832 return 0, errNoFetchesPending 833 } 834 reqTimer.UpdateSince(request.Time) 835 delete(pendPool, id) 836 837 // If no data items were retrieved, mark them as unavailable for the origin peer 838 if results == 0 { 839 for _, header := range request.Headers { 840 request.Peer.MarkLacking(header.Hash()) 841 } 842 } 843 // Assemble each of the results with their headers and retrieved data parts 844 var ( 845 accepted int 846 failure error 847 i int 848 hashes []common.Hash 849 ) 850 for _, header := range request.Headers { 851 // Short circuit assembly if no more fetch results are found 852 if i >= results { 853 break 854 } 855 // Validate the fields 856 if err := validate(i, header); err != nil { 857 failure = err 858 break 859 } 860 hashes = append(hashes, header.Hash()) 861 i++ 862 } 863 864 for _, header := range request.Headers[:i] { 865 if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil { 866 reconstruct(accepted, res) 867 } else { 868 // else: betweeen here and above, some other peer filled this result, 869 // or it was indeed a no-op. This should not happen, but if it does it's 870 // not something to panic about 871 log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) 872 failure = errStaleDelivery 873 } 874 // Clean up a successful fetch 875 delete(taskPool, hashes[accepted]) 876 accepted++ 877 } 878 // Return all failed or missing fetches to the queue 879 for _, header := range request.Headers[accepted:] { 880 taskQueue.Push(header, -int64(header.Number.Uint64())) 881 } 882 // Wake up Results 883 if accepted > 0 { 884 q.active.Signal() 885 } 886 if failure == nil { 887 return accepted, nil 888 } 889 // If none of the data was good, it's a stale delivery 890 if errors.Is(failure, errInvalidChain) { 891 return accepted, failure 892 } 893 if accepted > 0 { 894 return accepted, fmt.Errorf("partial failure: %v", failure) 895 } 896 return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery) 897 } 898 899 // Prepare configures the result cache to allow accepting and caching inbound 900 // fetch results. 901 func (q *queue) Prepare(offset uint64, mode SyncMode) { 902 q.lock.Lock() 903 defer q.lock.Unlock() 904 905 // Prepare the queue for sync results 906 q.resultCache.Prepare(offset) 907 q.mode = mode 908 }