github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/eth/downloader/queue.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // Copyright 2019 The go-aigar Authors 3 // This file is part of the go-aigar library. 4 // 5 // The go-aigar library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-aigar library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>. 17 18 // Contains the block download scheduler to collect download tasks and schedule 19 // them in an ordered, and throttled way. 20 21 package downloader 22 23 import ( 24 "errors" 25 "fmt" 26 "sync" 27 "time" 28 29 "github.com/AigarNetwork/aigar/common" 30 "github.com/AigarNetwork/aigar/common/prque" 31 "github.com/AigarNetwork/aigar/core/types" 32 "github.com/AigarNetwork/aigar/log" 33 "github.com/AigarNetwork/aigar/metrics" 34 ) 35 36 var ( 37 blockCacheItems = 8192 // Maximum number of blocks to cache before throttling the download 38 blockCacheMemory = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching 39 blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones 40 ) 41 42 var ( 43 errNoFetchesPending = errors.New("no fetches pending") 44 errStaleDelivery = errors.New("stale delivery") 45 ) 46 47 // fetchRequest is a currently running data retrieval operation. 48 type fetchRequest struct { 49 Peer *peerConnection // Peer to which the request was sent 50 From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) 51 Headers []*types.Header // [eth/62] Requested headers, sorted by request order 52 Time time.Time // Time when the request was made 53 } 54 55 // fetchResult is a struct collecting partial results from data fetchers until 56 // all outstanding pieces complete and the result as a whole can be processed. 57 type fetchResult struct { 58 Pending int // Number of data fetches still pending 59 Hash common.Hash // Hash of the header to prevent recalculating 60 61 Header *types.Header 62 Uncles []*types.Header 63 Transactions types.Transactions 64 Receipts types.Receipts 65 } 66 67 // queue represents hashes that are either need fetching or are being fetched 68 type queue struct { 69 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 70 71 // Headers are "special", they download in batches, supported by a skeleton chain 72 headerHead common.Hash // [eth/62] Hash of the last queued header to verify order 73 headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers 74 headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for 75 headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable 76 headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations 77 headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers 78 headerProced int // [eth/62] Number of headers already processed from the results 79 headerOffset uint64 // [eth/62] Number of the first header in the result cache 80 headerContCh chan bool // [eth/62] Channel to notify when header download finishes 81 82 // All data retrievals below are based on an already assembles header chain 83 blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers 84 blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for 85 blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations 86 blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches 87 88 receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers 89 receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for 90 receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations 91 receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches 92 93 resultCache []*fetchResult // Downloaded but not yet delivered fetch results 94 resultOffset uint64 // Offset of the first cached fetch result in the block chain 95 resultSize common.StorageSize // Approximate size of a block (exponential moving average) 96 97 lock *sync.Mutex 98 active *sync.Cond 99 closed bool 100 } 101 102 // newQueue creates a new download queue for scheduling block retrieval. 103 func newQueue() *queue { 104 lock := new(sync.Mutex) 105 return &queue{ 106 headerPendPool: make(map[string]*fetchRequest), 107 headerContCh: make(chan bool), 108 blockTaskPool: make(map[common.Hash]*types.Header), 109 blockTaskQueue: prque.New(nil), 110 blockPendPool: make(map[string]*fetchRequest), 111 blockDonePool: make(map[common.Hash]struct{}), 112 receiptTaskPool: make(map[common.Hash]*types.Header), 113 receiptTaskQueue: prque.New(nil), 114 receiptPendPool: make(map[string]*fetchRequest), 115 receiptDonePool: make(map[common.Hash]struct{}), 116 resultCache: make([]*fetchResult, blockCacheItems), 117 active: sync.NewCond(lock), 118 lock: lock, 119 } 120 } 121 122 // Reset clears out the queue contents. 123 func (q *queue) Reset() { 124 q.lock.Lock() 125 defer q.lock.Unlock() 126 127 q.closed = false 128 q.mode = FullSync 129 130 q.headerHead = common.Hash{} 131 q.headerPendPool = make(map[string]*fetchRequest) 132 133 q.blockTaskPool = make(map[common.Hash]*types.Header) 134 q.blockTaskQueue.Reset() 135 q.blockPendPool = make(map[string]*fetchRequest) 136 q.blockDonePool = make(map[common.Hash]struct{}) 137 138 q.receiptTaskPool = make(map[common.Hash]*types.Header) 139 q.receiptTaskQueue.Reset() 140 q.receiptPendPool = make(map[string]*fetchRequest) 141 q.receiptDonePool = make(map[common.Hash]struct{}) 142 143 q.resultCache = make([]*fetchResult, blockCacheItems) 144 q.resultOffset = 0 145 } 146 147 // Close marks the end of the sync, unblocking Results. 148 // It may be called even if the queue is already closed. 149 func (q *queue) Close() { 150 q.lock.Lock() 151 q.closed = true 152 q.lock.Unlock() 153 q.active.Broadcast() 154 } 155 156 // PendingHeaders retrieves the number of header requests pending for retrieval. 157 func (q *queue) PendingHeaders() int { 158 q.lock.Lock() 159 defer q.lock.Unlock() 160 161 return q.headerTaskQueue.Size() 162 } 163 164 // PendingBlocks retrieves the number of block (body) requests pending for retrieval. 165 func (q *queue) PendingBlocks() int { 166 q.lock.Lock() 167 defer q.lock.Unlock() 168 169 return q.blockTaskQueue.Size() 170 } 171 172 // PendingReceipts retrieves the number of block receipts pending for retrieval. 173 func (q *queue) PendingReceipts() int { 174 q.lock.Lock() 175 defer q.lock.Unlock() 176 177 return q.receiptTaskQueue.Size() 178 } 179 180 // InFlightHeaders retrieves whether there are header fetch requests currently 181 // in flight. 182 func (q *queue) InFlightHeaders() bool { 183 q.lock.Lock() 184 defer q.lock.Unlock() 185 186 return len(q.headerPendPool) > 0 187 } 188 189 // InFlightBlocks retrieves whether there are block fetch requests currently in 190 // flight. 191 func (q *queue) InFlightBlocks() bool { 192 q.lock.Lock() 193 defer q.lock.Unlock() 194 195 return len(q.blockPendPool) > 0 196 } 197 198 // InFlightReceipts retrieves whether there are receipt fetch requests currently 199 // in flight. 200 func (q *queue) InFlightReceipts() bool { 201 q.lock.Lock() 202 defer q.lock.Unlock() 203 204 return len(q.receiptPendPool) > 0 205 } 206 207 // Idle returns if the queue is fully idle or has some data still inside. 208 func (q *queue) Idle() bool { 209 q.lock.Lock() 210 defer q.lock.Unlock() 211 212 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() 213 pending := len(q.blockPendPool) + len(q.receiptPendPool) 214 cached := len(q.blockDonePool) + len(q.receiptDonePool) 215 216 return (queued + pending + cached) == 0 217 } 218 219 // ShouldThrottleBlocks checks if the download should be throttled (active block (body) 220 // fetches exceed block cache). 221 func (q *queue) ShouldThrottleBlocks() bool { 222 q.lock.Lock() 223 defer q.lock.Unlock() 224 225 return q.resultSlots(q.blockPendPool, q.blockDonePool) <= 0 226 } 227 228 // ShouldThrottleReceipts checks if the download should be throttled (active receipt 229 // fetches exceed block cache). 230 func (q *queue) ShouldThrottleReceipts() bool { 231 q.lock.Lock() 232 defer q.lock.Unlock() 233 234 return q.resultSlots(q.receiptPendPool, q.receiptDonePool) <= 0 235 } 236 237 // resultSlots calculates the number of results slots available for requests 238 // whilst adhering to both the item and the memory limit too of the results 239 // cache. 240 func (q *queue) resultSlots(pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}) int { 241 // Calculate the maximum length capped by the memory limit 242 limit := len(q.resultCache) 243 if common.StorageSize(len(q.resultCache))*q.resultSize > common.StorageSize(blockCacheMemory) { 244 limit = int((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) 245 } 246 // Calculate the number of slots already finished 247 finished := 0 248 for _, result := range q.resultCache[:limit] { 249 if result == nil { 250 break 251 } 252 if _, ok := donePool[result.Hash]; ok { 253 finished++ 254 } 255 } 256 // Calculate the number of slots currently downloading 257 pending := 0 258 for _, request := range pendPool { 259 for _, header := range request.Headers { 260 if header.Number.Uint64() < q.resultOffset+uint64(limit) { 261 pending++ 262 } 263 } 264 } 265 // Return the free slots to distribute 266 return limit - finished - pending 267 } 268 269 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill 270 // up an already retrieved header skeleton. 271 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { 272 q.lock.Lock() 273 defer q.lock.Unlock() 274 275 // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) 276 if q.headerResults != nil { 277 panic("skeleton assembly already in progress") 278 } 279 // Schedule all the header retrieval tasks for the skeleton assembly 280 q.headerTaskPool = make(map[uint64]*types.Header) 281 q.headerTaskQueue = prque.New(nil) 282 q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains 283 q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) 284 q.headerProced = 0 285 q.headerOffset = from 286 q.headerContCh = make(chan bool, 1) 287 288 for i, header := range skeleton { 289 index := from + uint64(i*MaxHeaderFetch) 290 291 q.headerTaskPool[index] = header 292 q.headerTaskQueue.Push(index, -int64(index)) 293 } 294 } 295 296 // RetrieveHeaders retrieves the header chain assemble based on the scheduled 297 // skeleton. 298 func (q *queue) RetrieveHeaders() ([]*types.Header, int) { 299 q.lock.Lock() 300 defer q.lock.Unlock() 301 302 headers, proced := q.headerResults, q.headerProced 303 q.headerResults, q.headerProced = nil, 0 304 305 return headers, proced 306 } 307 308 // Schedule adds a set of headers for the download queue for scheduling, returning 309 // the new headers encountered. 310 func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { 311 q.lock.Lock() 312 defer q.lock.Unlock() 313 314 // Insert all the headers prioritised by the contained block number 315 inserts := make([]*types.Header, 0, len(headers)) 316 for _, header := range headers { 317 // Make sure chain order is honoured and preserved throughout 318 hash := header.Hash() 319 if header.Number == nil || header.Number.Uint64() != from { 320 log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) 321 break 322 } 323 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 324 log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 325 break 326 } 327 // Make sure no duplicate requests are executed 328 if _, ok := q.blockTaskPool[hash]; ok { 329 log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) 330 continue 331 } 332 if _, ok := q.receiptTaskPool[hash]; ok { 333 log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) 334 continue 335 } 336 // Queue the header for content retrieval 337 q.blockTaskPool[hash] = header 338 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 339 340 if q.mode == FastSync { 341 q.receiptTaskPool[hash] = header 342 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 343 } 344 inserts = append(inserts, header) 345 q.headerHead = hash 346 from++ 347 } 348 return inserts 349 } 350 351 // Results retrieves and permanently removes a batch of fetch results from 352 // the cache. the result slice will be empty if the queue has been closed. 353 func (q *queue) Results(block bool) []*fetchResult { 354 q.lock.Lock() 355 defer q.lock.Unlock() 356 357 // Count the number of items available for processing 358 nproc := q.countProcessableItems() 359 for nproc == 0 && !q.closed { 360 if !block { 361 return nil 362 } 363 q.active.Wait() 364 nproc = q.countProcessableItems() 365 } 366 // Since we have a batch limit, don't pull more into "dangling" memory 367 if nproc > maxResultsProcess { 368 nproc = maxResultsProcess 369 } 370 results := make([]*fetchResult, nproc) 371 copy(results, q.resultCache[:nproc]) 372 if len(results) > 0 { 373 // Mark results as done before dropping them from the cache. 374 for _, result := range results { 375 hash := result.Header.Hash() 376 delete(q.blockDonePool, hash) 377 delete(q.receiptDonePool, hash) 378 } 379 // Delete the results from the cache and clear the tail. 380 copy(q.resultCache, q.resultCache[nproc:]) 381 for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ { 382 q.resultCache[i] = nil 383 } 384 // Advance the expected block number of the first cache entry. 385 q.resultOffset += uint64(nproc) 386 387 // Recalculate the result item weights to prevent memory exhaustion 388 for _, result := range results { 389 size := result.Header.Size() 390 for _, uncle := range result.Uncles { 391 size += uncle.Size() 392 } 393 for _, receipt := range result.Receipts { 394 size += receipt.Size() 395 } 396 for _, tx := range result.Transactions { 397 size += tx.Size() 398 } 399 q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize 400 } 401 } 402 return results 403 } 404 405 // countProcessableItems counts the processable items. 406 func (q *queue) countProcessableItems() int { 407 for i, result := range q.resultCache { 408 if result == nil || result.Pending > 0 { 409 return i 410 } 411 } 412 return len(q.resultCache) 413 } 414 415 // ReserveHeaders reserves a set of headers for the given peer, skipping any 416 // previously failed batches. 417 func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { 418 q.lock.Lock() 419 defer q.lock.Unlock() 420 421 // Short circuit if the peer's already downloading something (sanity check to 422 // not corrupt state) 423 if _, ok := q.headerPendPool[p.id]; ok { 424 return nil 425 } 426 // Retrieve a batch of hashes, skipping previously failed ones 427 send, skip := uint64(0), []uint64{} 428 for send == 0 && !q.headerTaskQueue.Empty() { 429 from, _ := q.headerTaskQueue.Pop() 430 if q.headerPeerMiss[p.id] != nil { 431 if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { 432 skip = append(skip, from.(uint64)) 433 continue 434 } 435 } 436 send = from.(uint64) 437 } 438 // Merge all the skipped batches back 439 for _, from := range skip { 440 q.headerTaskQueue.Push(from, -int64(from)) 441 } 442 // Assemble and return the block download request 443 if send == 0 { 444 return nil 445 } 446 request := &fetchRequest{ 447 Peer: p, 448 From: send, 449 Time: time.Now(), 450 } 451 q.headerPendPool[p.id] = request 452 return request 453 } 454 455 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 456 // previously failed downloads. Beside the next batch of needed fetches, it also 457 // returns a flag whether empty blocks were queued requiring processing. 458 func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, error) { 459 isNoop := func(header *types.Header) bool { 460 return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash 461 } 462 q.lock.Lock() 463 defer q.lock.Unlock() 464 465 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop) 466 } 467 468 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 469 // any previously failed downloads. Beside the next batch of needed fetches, it 470 // also returns a flag whether empty receipts were queued requiring importing. 471 func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, error) { 472 isNoop := func(header *types.Header) bool { 473 return header.ReceiptHash == types.EmptyRootHash 474 } 475 q.lock.Lock() 476 defer q.lock.Unlock() 477 478 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop) 479 } 480 481 // reserveHeaders reserves a set of data download operations for a given peer, 482 // skipping any previously failed ones. This method is a generic version used 483 // by the individual special reservation functions. 484 // 485 // Note, this method expects the queue lock to be already held for writing. The 486 // reason the lock is not obtained in here is because the parameters already need 487 // to access the queue, so they already need a lock anyway. 488 func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 489 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) { 490 // Short circuit if the pool has been depleted, or if the peer's already 491 // downloading something (sanity check not to corrupt state) 492 if taskQueue.Empty() { 493 return nil, false, nil 494 } 495 if _, ok := pendPool[p.id]; ok { 496 return nil, false, nil 497 } 498 // Calculate an upper limit on the items we might fetch (i.e. throttling) 499 space := q.resultSlots(pendPool, donePool) 500 501 // Retrieve a batch of tasks, skipping previously failed ones 502 send := make([]*types.Header, 0, count) 503 skip := make([]*types.Header, 0) 504 505 progress := false 506 for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { 507 header := taskQueue.PopItem().(*types.Header) 508 hash := header.Hash() 509 510 // If we're the first to request this task, initialise the result container 511 index := int(header.Number.Int64() - int64(q.resultOffset)) 512 if index >= len(q.resultCache) || index < 0 { 513 common.Report("index allocation went beyond available resultCache space") 514 return nil, false, errInvalidChain 515 } 516 if q.resultCache[index] == nil { 517 components := 1 518 if q.mode == FastSync { 519 components = 2 520 } 521 q.resultCache[index] = &fetchResult{ 522 Pending: components, 523 Hash: hash, 524 Header: header, 525 } 526 } 527 // If this fetch task is a noop, skip this fetch operation 528 if isNoop(header) { 529 donePool[hash] = struct{}{} 530 delete(taskPool, hash) 531 532 space, proc = space-1, proc-1 533 q.resultCache[index].Pending-- 534 progress = true 535 continue 536 } 537 // Otherwise unless the peer is known not to have the data, add to the retrieve list 538 if p.Lacks(hash) { 539 skip = append(skip, header) 540 } else { 541 send = append(send, header) 542 } 543 } 544 // Merge all the skipped headers back 545 for _, header := range skip { 546 taskQueue.Push(header, -int64(header.Number.Uint64())) 547 } 548 if progress { 549 // Wake Results, resultCache was modified 550 q.active.Signal() 551 } 552 // Assemble and return the block download request 553 if len(send) == 0 { 554 return nil, progress, nil 555 } 556 request := &fetchRequest{ 557 Peer: p, 558 Headers: send, 559 Time: time.Now(), 560 } 561 pendPool[p.id] = request 562 563 return request, progress, nil 564 } 565 566 // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. 567 func (q *queue) CancelHeaders(request *fetchRequest) { 568 q.cancel(request, q.headerTaskQueue, q.headerPendPool) 569 } 570 571 // CancelBodies aborts a body fetch request, returning all pending headers to the 572 // task queue. 573 func (q *queue) CancelBodies(request *fetchRequest) { 574 q.cancel(request, q.blockTaskQueue, q.blockPendPool) 575 } 576 577 // CancelReceipts aborts a body fetch request, returning all pending headers to 578 // the task queue. 579 func (q *queue) CancelReceipts(request *fetchRequest) { 580 q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) 581 } 582 583 // Cancel aborts a fetch request, returning all pending hashes to the task queue. 584 func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { 585 q.lock.Lock() 586 defer q.lock.Unlock() 587 588 if request.From > 0 { 589 taskQueue.Push(request.From, -int64(request.From)) 590 } 591 for _, header := range request.Headers { 592 taskQueue.Push(header, -int64(header.Number.Uint64())) 593 } 594 delete(pendPool, request.Peer.id) 595 } 596 597 // Revoke cancels all pending requests belonging to a given peer. This method is 598 // meant to be called during a peer drop to quickly reassign owned data fetches 599 // to remaining nodes. 600 func (q *queue) Revoke(peerID string) { 601 q.lock.Lock() 602 defer q.lock.Unlock() 603 604 if request, ok := q.blockPendPool[peerID]; ok { 605 for _, header := range request.Headers { 606 q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) 607 } 608 delete(q.blockPendPool, peerID) 609 } 610 if request, ok := q.receiptPendPool[peerID]; ok { 611 for _, header := range request.Headers { 612 q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) 613 } 614 delete(q.receiptPendPool, peerID) 615 } 616 } 617 618 // ExpireHeaders checks for in flight requests that exceeded a timeout allowance, 619 // canceling them and returning the responsible peers for penalisation. 620 func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { 621 q.lock.Lock() 622 defer q.lock.Unlock() 623 624 return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) 625 } 626 627 // ExpireBodies checks for in flight block body requests that exceeded a timeout 628 // allowance, canceling them and returning the responsible peers for penalisation. 629 func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { 630 q.lock.Lock() 631 defer q.lock.Unlock() 632 633 return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) 634 } 635 636 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 637 // allowance, canceling them and returning the responsible peers for penalisation. 638 func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { 639 q.lock.Lock() 640 defer q.lock.Unlock() 641 642 return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) 643 } 644 645 // expire is the generic check that move expired tasks from a pending pool back 646 // into a task pool, returning all entities caught with expired tasks. 647 // 648 // Note, this method expects the queue lock to be already held. The 649 // reason the lock is not obtained in here is because the parameters already need 650 // to access the queue, so they already need a lock anyway. 651 func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { 652 // Iterate over the expired requests and return each to the queue 653 expiries := make(map[string]int) 654 for id, request := range pendPool { 655 if time.Since(request.Time) > timeout { 656 // Update the metrics with the timeout 657 timeoutMeter.Mark(1) 658 659 // Return any non satisfied requests to the pool 660 if request.From > 0 { 661 taskQueue.Push(request.From, -int64(request.From)) 662 } 663 for _, header := range request.Headers { 664 taskQueue.Push(header, -int64(header.Number.Uint64())) 665 } 666 // Add the peer to the expiry report along the number of failed requests 667 expiries[id] = len(request.Headers) 668 669 // Remove the expired requests from the pending pool directly 670 delete(pendPool, id) 671 } 672 } 673 return expiries 674 } 675 676 // DeliverHeaders injects a header retrieval response into the header results 677 // cache. This method either accepts all headers it received, or none of them 678 // if they do not map correctly to the skeleton. 679 // 680 // If the headers are accepted, the method makes an attempt to deliver the set 681 // of ready headers to the processor to keep the pipeline full. However it will 682 // not block to prevent stalling other pending deliveries. 683 func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { 684 q.lock.Lock() 685 defer q.lock.Unlock() 686 687 // Short circuit if the data was never requested 688 request := q.headerPendPool[id] 689 if request == nil { 690 return 0, errNoFetchesPending 691 } 692 headerReqTimer.UpdateSince(request.Time) 693 delete(q.headerPendPool, id) 694 695 // Ensure headers can be mapped onto the skeleton chain 696 target := q.headerTaskPool[request.From].Hash() 697 698 accepted := len(headers) == MaxHeaderFetch 699 if accepted { 700 if headers[0].Number.Uint64() != request.From { 701 log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From) 702 accepted = false 703 } else if headers[len(headers)-1].Hash() != target { 704 log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) 705 accepted = false 706 } 707 } 708 if accepted { 709 for i, header := range headers[1:] { 710 hash := header.Hash() 711 if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { 712 log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want) 713 accepted = false 714 break 715 } 716 if headers[i].Hash() != header.ParentHash { 717 log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash) 718 accepted = false 719 break 720 } 721 } 722 } 723 // If the batch of headers wasn't accepted, mark as unavailable 724 if !accepted { 725 log.Trace("Skeleton filling not accepted", "peer", id, "from", request.From) 726 727 miss := q.headerPeerMiss[id] 728 if miss == nil { 729 q.headerPeerMiss[id] = make(map[uint64]struct{}) 730 miss = q.headerPeerMiss[id] 731 } 732 miss[request.From] = struct{}{} 733 734 q.headerTaskQueue.Push(request.From, -int64(request.From)) 735 return 0, errors.New("delivery not accepted") 736 } 737 // Clean up a successful fetch and try to deliver any sub-results 738 copy(q.headerResults[request.From-q.headerOffset:], headers) 739 delete(q.headerTaskPool, request.From) 740 741 ready := 0 742 for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { 743 ready += MaxHeaderFetch 744 } 745 if ready > 0 { 746 // Headers are ready for delivery, gather them and push forward (non blocking) 747 process := make([]*types.Header, ready) 748 copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) 749 750 select { 751 case headerProcCh <- process: 752 log.Trace("Pre-scheduled new headers", "peer", id, "count", len(process), "from", process[0].Number) 753 q.headerProced += len(process) 754 default: 755 } 756 } 757 // Check for termination and return 758 if len(q.headerTaskPool) == 0 { 759 q.headerContCh <- false 760 } 761 return len(headers), nil 762 } 763 764 // DeliverBodies injects a block body retrieval response into the results queue. 765 // The method returns the number of blocks bodies accepted from the delivery and 766 // also wakes any threads waiting for data delivery. 767 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { 768 q.lock.Lock() 769 defer q.lock.Unlock() 770 771 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 772 if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { 773 return errInvalidBody 774 } 775 result.Transactions = txLists[index] 776 result.Uncles = uncleLists[index] 777 return nil 778 } 779 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct) 780 } 781 782 // DeliverReceipts injects a receipt retrieval response into the results queue. 783 // The method returns the number of transaction receipts accepted from the delivery 784 // and also wakes any threads waiting for data delivery. 785 func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { 786 q.lock.Lock() 787 defer q.lock.Unlock() 788 789 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 790 if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { 791 return errInvalidReceipt 792 } 793 result.Receipts = receiptList[index] 794 return nil 795 } 796 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct) 797 } 798 799 // deliver injects a data retrieval response into the results queue. 800 // 801 // Note, this method expects the queue lock to be already held for writing. The 802 // reason the lock is not obtained in here is because the parameters already need 803 // to access the queue, so they already need a lock anyway. 804 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 805 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer, 806 results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) { 807 808 // Short circuit if the data was never requested 809 request := pendPool[id] 810 if request == nil { 811 return 0, errNoFetchesPending 812 } 813 reqTimer.UpdateSince(request.Time) 814 delete(pendPool, id) 815 816 // If no data items were retrieved, mark them as unavailable for the origin peer 817 if results == 0 { 818 for _, header := range request.Headers { 819 request.Peer.MarkLacking(header.Hash()) 820 } 821 } 822 // Assemble each of the results with their headers and retrieved data parts 823 var ( 824 accepted int 825 failure error 826 useful bool 827 ) 828 for i, header := range request.Headers { 829 // Short circuit assembly if no more fetch results are found 830 if i >= results { 831 break 832 } 833 // Reconstruct the next result if contents match up 834 index := int(header.Number.Int64() - int64(q.resultOffset)) 835 if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { 836 failure = errInvalidChain 837 break 838 } 839 if err := reconstruct(header, i, q.resultCache[index]); err != nil { 840 failure = err 841 break 842 } 843 hash := header.Hash() 844 845 donePool[hash] = struct{}{} 846 q.resultCache[index].Pending-- 847 useful = true 848 accepted++ 849 850 // Clean up a successful fetch 851 request.Headers[i] = nil 852 delete(taskPool, hash) 853 } 854 // Return all failed or missing fetches to the queue 855 for _, header := range request.Headers { 856 if header != nil { 857 taskQueue.Push(header, -int64(header.Number.Uint64())) 858 } 859 } 860 // Wake up Results 861 if accepted > 0 { 862 q.active.Signal() 863 } 864 // If none of the data was good, it's a stale delivery 865 switch { 866 case failure == nil || failure == errInvalidChain: 867 return accepted, failure 868 case useful: 869 return accepted, fmt.Errorf("partial failure: %v", failure) 870 default: 871 return accepted, errStaleDelivery 872 } 873 } 874 875 // Prepare configures the result cache to allow accepting and caching inbound 876 // fetch results. 877 func (q *queue) Prepare(offset uint64, mode SyncMode) { 878 q.lock.Lock() 879 defer q.lock.Unlock() 880 881 // Prepare the queue for sync results 882 if q.resultOffset < offset { 883 q.resultOffset = offset 884 } 885 q.mode = mode 886 }