github.com/arieschain/arieschain@v0.0.0-20191023063405-37c074544356/qct/downloader/queue.go (about) 1 // Contains the block download scheduler to collect download tasks and schedule 2 // them in an ordered, and throttled way. 3 4 package downloader 5 6 import ( 7 "errors" 8 "fmt" 9 "sync" 10 "time" 11 12 "github.com/quickchainproject/quickchain/common" 13 "github.com/quickchainproject/quickchain/core/types" 14 "github.com/quickchainproject/quickchain/log" 15 "github.com/quickchainproject/quickchain/metrics" 16 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 17 ) 18 19 var ( 20 blockCacheItems = 8192 // Maximum number of blocks to cache before throttling the download 21 blockCacheMemory = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching 22 blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones 23 ) 24 25 var ( 26 errNoFetchesPending = errors.New("no fetches pending") 27 errStaleDelivery = errors.New("stale delivery") 28 ) 29 30 // fetchRequest is a currently running data retrieval operation. 31 type fetchRequest struct { 32 Peer *peerConnection // Peer to which the request was sent 33 From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) 34 Headers []*types.Header // [eth/62] Requested headers, sorted by request order 35 Time time.Time // Time when the request was made 36 } 37 38 // fetchResult is a struct collecting partial results from data fetchers until 39 // all outstanding pieces complete and the result as a whole can be processed. 40 type fetchResult struct { 41 Pending int // Number of data fetches still pending 42 Hash common.Hash // Hash of the header to prevent recalculating 43 44 Header *types.Header 45 Uncles []*types.Header 46 Transactions types.Transactions 47 Receipts types.Receipts 48 } 49 50 // queue represents hashes that are either need fetching or are being fetched 51 type queue struct { 52 mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching 53 54 // Headers are "special", they download in batches, supported by a skeleton chain 55 headerHead common.Hash // [eth/62] Hash of the last queued header to verify order 56 headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers 57 headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for 58 headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable 59 headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations 60 headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers 61 headerProced int // [eth/62] Number of headers already processed from the results 62 headerOffset uint64 // [eth/62] Number of the first header in the result cache 63 headerContCh chan bool // [eth/62] Channel to notify when header download finishes 64 65 // All data retrievals below are based on an already assembles header chain 66 blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers 67 blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for 68 blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations 69 blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches 70 71 receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers 72 receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for 73 receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations 74 receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches 75 76 resultCache []*fetchResult // Downloaded but not yet delivered fetch results 77 resultOffset uint64 // Offset of the first cached fetch result in the block chain 78 resultSize common.StorageSize // Approximate size of a block (exponential moving average) 79 80 lock *sync.Mutex 81 active *sync.Cond 82 closed bool 83 } 84 85 // newQueue creates a new download queue for scheduling block retrieval. 86 func newQueue() *queue { 87 lock := new(sync.Mutex) 88 return &queue{ 89 headerPendPool: make(map[string]*fetchRequest), 90 headerContCh: make(chan bool), 91 blockTaskPool: make(map[common.Hash]*types.Header), 92 blockTaskQueue: prque.New(), 93 blockPendPool: make(map[string]*fetchRequest), 94 blockDonePool: make(map[common.Hash]struct{}), 95 receiptTaskPool: make(map[common.Hash]*types.Header), 96 receiptTaskQueue: prque.New(), 97 receiptPendPool: make(map[string]*fetchRequest), 98 receiptDonePool: make(map[common.Hash]struct{}), 99 resultCache: make([]*fetchResult, blockCacheItems), 100 active: sync.NewCond(lock), 101 lock: lock, 102 } 103 } 104 105 // Reset clears out the queue contents. 106 func (q *queue) Reset() { 107 q.lock.Lock() 108 defer q.lock.Unlock() 109 110 q.closed = false 111 q.mode = FullSync 112 113 q.headerHead = common.Hash{} 114 q.headerPendPool = make(map[string]*fetchRequest) 115 116 q.blockTaskPool = make(map[common.Hash]*types.Header) 117 q.blockTaskQueue.Reset() 118 q.blockPendPool = make(map[string]*fetchRequest) 119 q.blockDonePool = make(map[common.Hash]struct{}) 120 121 q.receiptTaskPool = make(map[common.Hash]*types.Header) 122 q.receiptTaskQueue.Reset() 123 q.receiptPendPool = make(map[string]*fetchRequest) 124 q.receiptDonePool = make(map[common.Hash]struct{}) 125 126 q.resultCache = make([]*fetchResult, blockCacheItems) 127 q.resultOffset = 0 128 } 129 130 // Close marks the end of the sync, unblocking WaitResults. 131 // It may be called even if the queue is already closed. 132 func (q *queue) Close() { 133 q.lock.Lock() 134 q.closed = true 135 q.lock.Unlock() 136 q.active.Broadcast() 137 } 138 139 // PendingHeaders retrieves the number of header requests pending for retrieval. 140 func (q *queue) PendingHeaders() int { 141 q.lock.Lock() 142 defer q.lock.Unlock() 143 144 return q.headerTaskQueue.Size() 145 } 146 147 // PendingBlocks retrieves the number of block (body) requests pending for retrieval. 148 func (q *queue) PendingBlocks() int { 149 q.lock.Lock() 150 defer q.lock.Unlock() 151 152 return q.blockTaskQueue.Size() 153 } 154 155 // PendingReceipts retrieves the number of block receipts pending for retrieval. 156 func (q *queue) PendingReceipts() int { 157 q.lock.Lock() 158 defer q.lock.Unlock() 159 160 return q.receiptTaskQueue.Size() 161 } 162 163 // InFlightHeaders retrieves whether there are header fetch requests currently 164 // in flight. 165 func (q *queue) InFlightHeaders() bool { 166 q.lock.Lock() 167 defer q.lock.Unlock() 168 169 return len(q.headerPendPool) > 0 170 } 171 172 // InFlightBlocks retrieves whether there are block fetch requests currently in 173 // flight. 174 func (q *queue) InFlightBlocks() bool { 175 q.lock.Lock() 176 defer q.lock.Unlock() 177 178 return len(q.blockPendPool) > 0 179 } 180 181 // InFlightReceipts retrieves whether there are receipt fetch requests currently 182 // in flight. 183 func (q *queue) InFlightReceipts() bool { 184 q.lock.Lock() 185 defer q.lock.Unlock() 186 187 return len(q.receiptPendPool) > 0 188 } 189 190 // Idle returns if the queue is fully idle or has some data still inside. 191 func (q *queue) Idle() bool { 192 q.lock.Lock() 193 defer q.lock.Unlock() 194 195 queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() 196 pending := len(q.blockPendPool) + len(q.receiptPendPool) 197 cached := len(q.blockDonePool) + len(q.receiptDonePool) 198 199 return (queued + pending + cached) == 0 200 } 201 202 // ShouldThrottleBlocks checks if the download should be throttled (active block (body) 203 // fetches exceed block cache). 204 func (q *queue) ShouldThrottleBlocks() bool { 205 q.lock.Lock() 206 defer q.lock.Unlock() 207 208 return q.resultSlots(q.blockPendPool, q.blockDonePool) <= 0 209 } 210 211 // ShouldThrottleReceipts checks if the download should be throttled (active receipt 212 // fetches exceed block cache). 213 func (q *queue) ShouldThrottleReceipts() bool { 214 q.lock.Lock() 215 defer q.lock.Unlock() 216 217 return q.resultSlots(q.receiptPendPool, q.receiptDonePool) <= 0 218 } 219 220 // resultSlots calculates the number of results slots available for requests 221 // whilst adhering to both the item and the memory limit too of the results 222 // cache. 223 func (q *queue) resultSlots(pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}) int { 224 // Calculate the maximum length capped by the memory limit 225 limit := len(q.resultCache) 226 if common.StorageSize(len(q.resultCache))*q.resultSize > common.StorageSize(blockCacheMemory) { 227 limit = int((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) 228 } 229 // Calculate the number of slots already finished 230 finished := 0 231 for _, result := range q.resultCache[:limit] { 232 if result == nil { 233 break 234 } 235 if _, ok := donePool[result.Hash]; ok { 236 finished++ 237 } 238 } 239 // Calculate the number of slots currently downloading 240 pending := 0 241 for _, request := range pendPool { 242 for _, header := range request.Headers { 243 if header.Number.Uint64() < q.resultOffset+uint64(limit) { 244 pending++ 245 } 246 } 247 } 248 // Return the free slots to distribute 249 return limit - finished - pending 250 } 251 252 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill 253 // up an already retrieved header skeleton. 254 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { 255 q.lock.Lock() 256 defer q.lock.Unlock() 257 258 // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) 259 if q.headerResults != nil { 260 panic("skeleton assembly already in progress") 261 } 262 // Schedule all the header retrieval tasks for the skeleton assembly 263 q.headerTaskPool = make(map[uint64]*types.Header) 264 q.headerTaskQueue = prque.New() 265 q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains 266 q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) 267 q.headerProced = 0 268 q.headerOffset = from 269 q.headerContCh = make(chan bool, 1) 270 271 for i, header := range skeleton { 272 index := from + uint64(i*MaxHeaderFetch) 273 274 q.headerTaskPool[index] = header 275 q.headerTaskQueue.Push(index, -float32(index)) 276 } 277 } 278 279 // RetrieveHeaders retrieves the header chain assemble based on the scheduled 280 // skeleton. 281 func (q *queue) RetrieveHeaders() ([]*types.Header, int) { 282 q.lock.Lock() 283 defer q.lock.Unlock() 284 285 headers, proced := q.headerResults, q.headerProced 286 q.headerResults, q.headerProced = nil, 0 287 288 return headers, proced 289 } 290 291 // Schedule adds a set of headers for the download queue for scheduling, returning 292 // the new headers encountered. 293 func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { 294 q.lock.Lock() 295 defer q.lock.Unlock() 296 297 // Insert all the headers prioritised by the contained block number 298 inserts := make([]*types.Header, 0, len(headers)) 299 for _, header := range headers { 300 // Make sure chain order is honoured and preserved throughout 301 hash := header.Hash() 302 if header.Number == nil || header.Number.Uint64() != from { 303 log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) 304 break 305 } 306 if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { 307 log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) 308 break 309 } 310 // Make sure no duplicate requests are executed 311 if _, ok := q.blockTaskPool[hash]; ok { 312 log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) 313 continue 314 } 315 if _, ok := q.receiptTaskPool[hash]; ok { 316 log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) 317 continue 318 } 319 // Queue the header for content retrieval 320 q.blockTaskPool[hash] = header 321 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 322 323 if q.mode == FastSync { 324 q.receiptTaskPool[hash] = header 325 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 326 } 327 inserts = append(inserts, header) 328 q.headerHead = hash 329 from++ 330 } 331 return inserts 332 } 333 334 // Results retrieves and permanently removes a batch of fetch results from 335 // the cache. the result slice will be empty if the queue has been closed. 336 func (q *queue) Results(block bool) []*fetchResult { 337 q.lock.Lock() 338 defer q.lock.Unlock() 339 340 // Count the number of items available for processing 341 nproc := q.countProcessableItems() 342 for nproc == 0 && !q.closed { 343 if !block { 344 return nil 345 } 346 q.active.Wait() 347 nproc = q.countProcessableItems() 348 } 349 // Since we have a batch limit, don't pull more into "dangling" memory 350 if nproc > maxResultsProcess { 351 nproc = maxResultsProcess 352 } 353 results := make([]*fetchResult, nproc) 354 copy(results, q.resultCache[:nproc]) 355 if len(results) > 0 { 356 // Mark results as done before dropping them from the cache. 357 for _, result := range results { 358 hash := result.Header.Hash() 359 delete(q.blockDonePool, hash) 360 delete(q.receiptDonePool, hash) 361 } 362 // Delete the results from the cache and clear the tail. 363 copy(q.resultCache, q.resultCache[nproc:]) 364 for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ { 365 q.resultCache[i] = nil 366 } 367 // Advance the expected block number of the first cache entry. 368 q.resultOffset += uint64(nproc) 369 370 // Recalculate the result item weights to prevent memory exhaustion 371 for _, result := range results { 372 size := result.Header.Size() 373 for _, uncle := range result.Uncles { 374 size += uncle.Size() 375 } 376 for _, receipt := range result.Receipts { 377 size += receipt.Size() 378 } 379 for _, tx := range result.Transactions { 380 size += tx.Size() 381 } 382 q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize 383 } 384 } 385 return results 386 } 387 388 // countProcessableItems counts the processable items. 389 func (q *queue) countProcessableItems() int { 390 for i, result := range q.resultCache { 391 if result == nil || result.Pending > 0 { 392 return i 393 } 394 } 395 return len(q.resultCache) 396 } 397 398 // ReserveHeaders reserves a set of headers for the given peer, skipping any 399 // previously failed batches. 400 func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { 401 q.lock.Lock() 402 defer q.lock.Unlock() 403 404 // Short circuit if the peer's already downloading something (sanity check to 405 // not corrupt state) 406 if _, ok := q.headerPendPool[p.id]; ok { 407 return nil 408 } 409 // Retrieve a batch of hashes, skipping previously failed ones 410 send, skip := uint64(0), []uint64{} 411 for send == 0 && !q.headerTaskQueue.Empty() { 412 from, _ := q.headerTaskQueue.Pop() 413 if q.headerPeerMiss[p.id] != nil { 414 if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { 415 skip = append(skip, from.(uint64)) 416 continue 417 } 418 } 419 send = from.(uint64) 420 } 421 // Merge all the skipped batches back 422 for _, from := range skip { 423 q.headerTaskQueue.Push(from, -float32(from)) 424 } 425 // Assemble and return the block download request 426 if send == 0 { 427 return nil 428 } 429 request := &fetchRequest{ 430 Peer: p, 431 From: send, 432 Time: time.Now(), 433 } 434 q.headerPendPool[p.id] = request 435 return request 436 } 437 438 // ReserveBodies reserves a set of body fetches for the given peer, skipping any 439 // previously failed downloads. Beside the next batch of needed fetches, it also 440 // returns a flag whether empty blocks were queued requiring processing. 441 func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, error) { 442 isNoop := func(header *types.Header) bool { 443 return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash 444 } 445 q.lock.Lock() 446 defer q.lock.Unlock() 447 448 return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop) 449 } 450 451 // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping 452 // any previously failed downloads. Beside the next batch of needed fetches, it 453 // also returns a flag whether empty receipts were queued requiring importing. 454 func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, error) { 455 isNoop := func(header *types.Header) bool { 456 return header.ReceiptHash == types.EmptyRootHash 457 } 458 q.lock.Lock() 459 defer q.lock.Unlock() 460 461 return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop) 462 } 463 464 // reserveHeaders reserves a set of data download operations for a given peer, 465 // skipping any previously failed ones. This method is a generic version used 466 // by the individual special reservation functions. 467 // 468 // Note, this method expects the queue lock to be already held for writing. The 469 // reason the lock is not obtained in here is because the parameters already need 470 // to access the queue, so they already need a lock anyway. 471 func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 472 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) { 473 // Short circuit if the pool has been depleted, or if the peer's already 474 // downloading something (sanity check not to corrupt state) 475 if taskQueue.Empty() { 476 return nil, false, nil 477 } 478 if _, ok := pendPool[p.id]; ok { 479 return nil, false, nil 480 } 481 // Calculate an upper limit on the items we might fetch (i.e. throttling) 482 space := q.resultSlots(pendPool, donePool) 483 484 // Retrieve a batch of tasks, skipping previously failed ones 485 send := make([]*types.Header, 0, count) 486 skip := make([]*types.Header, 0) 487 488 progress := false 489 for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { 490 header := taskQueue.PopItem().(*types.Header) 491 hash := header.Hash() 492 493 // If we're the first to request this task, initialise the result container 494 index := int(header.Number.Int64() - int64(q.resultOffset)) 495 if index >= len(q.resultCache) || index < 0 { 496 common.Report("index allocation went beyond available resultCache space") 497 return nil, false, errInvalidChain 498 } 499 if q.resultCache[index] == nil { 500 components := 1 501 if q.mode == FastSync { 502 components = 2 503 } 504 q.resultCache[index] = &fetchResult{ 505 Pending: components, 506 Hash: hash, 507 Header: header, 508 } 509 } 510 // If this fetch task is a noop, skip this fetch operation 511 if isNoop(header) { 512 donePool[hash] = struct{}{} 513 delete(taskPool, hash) 514 515 space, proc = space-1, proc-1 516 q.resultCache[index].Pending-- 517 progress = true 518 continue 519 } 520 // Otherwise unless the peer is known not to have the data, add to the retrieve list 521 if p.Lacks(hash) { 522 skip = append(skip, header) 523 } else { 524 send = append(send, header) 525 } 526 } 527 // Merge all the skipped headers back 528 for _, header := range skip { 529 taskQueue.Push(header, -float32(header.Number.Uint64())) 530 } 531 if progress { 532 // Wake WaitResults, resultCache was modified 533 q.active.Signal() 534 } 535 // Assemble and return the block download request 536 if len(send) == 0 { 537 return nil, progress, nil 538 } 539 request := &fetchRequest{ 540 Peer: p, 541 Headers: send, 542 Time: time.Now(), 543 } 544 pendPool[p.id] = request 545 546 return request, progress, nil 547 } 548 549 // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. 550 func (q *queue) CancelHeaders(request *fetchRequest) { 551 q.cancel(request, q.headerTaskQueue, q.headerPendPool) 552 } 553 554 // CancelBodies aborts a body fetch request, returning all pending headers to the 555 // task queue. 556 func (q *queue) CancelBodies(request *fetchRequest) { 557 q.cancel(request, q.blockTaskQueue, q.blockPendPool) 558 } 559 560 // CancelReceipts aborts a body fetch request, returning all pending headers to 561 // the task queue. 562 func (q *queue) CancelReceipts(request *fetchRequest) { 563 q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) 564 } 565 566 // Cancel aborts a fetch request, returning all pending hashes to the task queue. 567 func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { 568 q.lock.Lock() 569 defer q.lock.Unlock() 570 571 if request.From > 0 { 572 taskQueue.Push(request.From, -float32(request.From)) 573 } 574 for _, header := range request.Headers { 575 taskQueue.Push(header, -float32(header.Number.Uint64())) 576 } 577 delete(pendPool, request.Peer.id) 578 } 579 580 // Revoke cancels all pending requests belonging to a given peer. This method is 581 // meant to be called during a peer drop to quickly reassign owned data fetches 582 // to remaining nodes. 583 func (q *queue) Revoke(peerId string) { 584 q.lock.Lock() 585 defer q.lock.Unlock() 586 587 if request, ok := q.blockPendPool[peerId]; ok { 588 for _, header := range request.Headers { 589 q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) 590 } 591 delete(q.blockPendPool, peerId) 592 } 593 if request, ok := q.receiptPendPool[peerId]; ok { 594 for _, header := range request.Headers { 595 q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) 596 } 597 delete(q.receiptPendPool, peerId) 598 } 599 } 600 601 // ExpireHeaders checks for in flight requests that exceeded a timeout allowance, 602 // canceling them and returning the responsible peers for penalisation. 603 func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { 604 q.lock.Lock() 605 defer q.lock.Unlock() 606 607 return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) 608 } 609 610 // ExpireBodies checks for in flight block body requests that exceeded a timeout 611 // allowance, canceling them and returning the responsible peers for penalisation. 612 func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { 613 q.lock.Lock() 614 defer q.lock.Unlock() 615 616 return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) 617 } 618 619 // ExpireReceipts checks for in flight receipt requests that exceeded a timeout 620 // allowance, canceling them and returning the responsible peers for penalisation. 621 func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { 622 q.lock.Lock() 623 defer q.lock.Unlock() 624 625 return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) 626 } 627 628 // expire is the generic check that move expired tasks from a pending pool back 629 // into a task pool, returning all entities caught with expired tasks. 630 // 631 // Note, this method expects the queue lock to be already held. The 632 // reason the lock is not obtained in here is because the parameters already need 633 // to access the queue, so they already need a lock anyway. 634 func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { 635 // Iterate over the expired requests and return each to the queue 636 expiries := make(map[string]int) 637 for id, request := range pendPool { 638 if time.Since(request.Time) > timeout { 639 // Update the metrics with the timeout 640 timeoutMeter.Mark(1) 641 642 // Return any non satisfied requests to the pool 643 if request.From > 0 { 644 taskQueue.Push(request.From, -float32(request.From)) 645 } 646 for _, header := range request.Headers { 647 taskQueue.Push(header, -float32(header.Number.Uint64())) 648 } 649 // Add the peer to the expiry report along the the number of failed requests 650 expiries[id] = len(request.Headers) 651 } 652 } 653 // Remove the expired requests from the pending pool 654 for id := range expiries { 655 delete(pendPool, id) 656 } 657 return expiries 658 } 659 660 // DeliverHeaders injects a header retrieval response into the header results 661 // cache. This method either accepts all headers it received, or none of them 662 // if they do not map correctly to the skeleton. 663 // 664 // If the headers are accepted, the method makes an attempt to deliver the set 665 // of ready headers to the processor to keep the pipeline full. However it will 666 // not block to prevent stalling other pending deliveries. 667 func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { 668 q.lock.Lock() 669 defer q.lock.Unlock() 670 671 // Short circuit if the data was never requested 672 request := q.headerPendPool[id] 673 if request == nil { 674 return 0, errNoFetchesPending 675 } 676 headerReqTimer.UpdateSince(request.Time) 677 delete(q.headerPendPool, id) 678 679 // Ensure headers can be mapped onto the skeleton chain 680 target := q.headerTaskPool[request.From].Hash() 681 682 accepted := len(headers) == MaxHeaderFetch 683 if accepted { 684 if headers[0].Number.Uint64() != request.From { 685 log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From) 686 accepted = false 687 } else if headers[len(headers)-1].Hash() != target { 688 log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) 689 accepted = false 690 } 691 } 692 if accepted { 693 for i, header := range headers[1:] { 694 hash := header.Hash() 695 if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { 696 log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want) 697 accepted = false 698 break 699 } 700 if headers[i].Hash() != header.ParentHash { 701 log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash) 702 accepted = false 703 break 704 } 705 } 706 } 707 // If the batch of headers wasn't accepted, mark as unavailable 708 if !accepted { 709 log.Trace("Skeleton filling not accepted", "peer", id, "from", request.From) 710 711 miss := q.headerPeerMiss[id] 712 if miss == nil { 713 q.headerPeerMiss[id] = make(map[uint64]struct{}) 714 miss = q.headerPeerMiss[id] 715 } 716 miss[request.From] = struct{}{} 717 718 q.headerTaskQueue.Push(request.From, -float32(request.From)) 719 return 0, errors.New("delivery not accepted") 720 } 721 // Clean up a successful fetch and try to deliver any sub-results 722 copy(q.headerResults[request.From-q.headerOffset:], headers) 723 delete(q.headerTaskPool, request.From) 724 725 ready := 0 726 for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { 727 ready += MaxHeaderFetch 728 } 729 if ready > 0 { 730 // Headers are ready for delivery, gather them and push forward (non blocking) 731 process := make([]*types.Header, ready) 732 copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) 733 734 select { 735 case headerProcCh <- process: 736 log.Trace("Pre-scheduled new headers", "peer", id, "count", len(process), "from", process[0].Number) 737 q.headerProced += len(process) 738 default: 739 } 740 } 741 // Check for termination and return 742 if len(q.headerTaskPool) == 0 { 743 q.headerContCh <- false 744 } 745 return len(headers), nil 746 } 747 748 // DeliverBodies injects a block body retrieval response into the results queue. 749 // The method returns the number of blocks bodies accepted from the delivery and 750 // also wakes any threads waiting for data delivery. 751 func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { 752 q.lock.Lock() 753 defer q.lock.Unlock() 754 755 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 756 if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { 757 return errInvalidBody 758 } 759 result.Transactions = txLists[index] 760 result.Uncles = uncleLists[index] 761 return nil 762 } 763 return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct) 764 } 765 766 // DeliverReceipts injects a receipt retrieval response into the results queue. 767 // The method returns the number of transaction receipts accepted from the delivery 768 // and also wakes any threads waiting for data delivery. 769 func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { 770 q.lock.Lock() 771 defer q.lock.Unlock() 772 773 reconstruct := func(header *types.Header, index int, result *fetchResult) error { 774 if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { 775 return errInvalidReceipt 776 } 777 result.Receipts = receiptList[index] 778 return nil 779 } 780 return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct) 781 } 782 783 // deliver injects a data retrieval response into the results queue. 784 // 785 // Note, this method expects the queue lock to be already held for writing. The 786 // reason the lock is not obtained in here is because the parameters already need 787 // to access the queue, so they already need a lock anyway. 788 func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, 789 pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer, 790 results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) { 791 792 // Short circuit if the data was never requested 793 request := pendPool[id] 794 if request == nil { 795 return 0, errNoFetchesPending 796 } 797 reqTimer.UpdateSince(request.Time) 798 delete(pendPool, id) 799 800 // If no data items were retrieved, mark them as unavailable for the origin peer 801 if results == 0 { 802 for _, header := range request.Headers { 803 request.Peer.MarkLacking(header.Hash()) 804 } 805 } 806 // Assemble each of the results with their headers and retrieved data parts 807 var ( 808 accepted int 809 failure error 810 useful bool 811 ) 812 for i, header := range request.Headers { 813 // Short circuit assembly if no more fetch results are found 814 if i >= results { 815 break 816 } 817 // Reconstruct the next result if contents match up 818 index := int(header.Number.Int64() - int64(q.resultOffset)) 819 if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { 820 failure = errInvalidChain 821 break 822 } 823 if err := reconstruct(header, i, q.resultCache[index]); err != nil { 824 failure = err 825 break 826 } 827 hash := header.Hash() 828 829 donePool[hash] = struct{}{} 830 q.resultCache[index].Pending-- 831 useful = true 832 accepted++ 833 834 // Clean up a successful fetch 835 request.Headers[i] = nil 836 delete(taskPool, hash) 837 } 838 // Return all failed or missing fetches to the queue 839 for _, header := range request.Headers { 840 if header != nil { 841 taskQueue.Push(header, -float32(header.Number.Uint64())) 842 } 843 } 844 // Wake up WaitResults 845 if accepted > 0 { 846 q.active.Signal() 847 } 848 // If none of the data was good, it's a stale delivery 849 switch { 850 case failure == nil || failure == errInvalidChain: 851 return accepted, failure 852 case useful: 853 return accepted, fmt.Errorf("partial failure: %v", failure) 854 default: 855 return accepted, errStaleDelivery 856 } 857 } 858 859 // Prepare configures the result cache to allow accepting and caching inbound 860 // fetch results. 861 func (q *queue) Prepare(offset uint64, mode SyncMode) { 862 q.lock.Lock() 863 defer q.lock.Unlock() 864 865 // Prepare the queue for sync results 866 if q.resultOffset < offset { 867 q.resultOffset = offset 868 } 869 q.mode = mode 870 }