github.com/haliliceylan/bsc@v1.1.10-0.20220501224556-eb78d644ebcb/eth/downloader/queue.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Contains the block download scheduler to collect download tasks and schedule
    18  // them in an ordered, and throttled way.
    19  
    20  package downloader
    21  
    22  import (
    23  	"errors"
    24  	"fmt"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/common/prque"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/log"
    33  	"github.com/ethereum/go-ethereum/metrics"
    34  	"github.com/ethereum/go-ethereum/trie"
    35  )
    36  
    37  const (
    38  	bodyType    = uint(0)
    39  	receiptType = uint(1)
    40  )
    41  
    42  var (
    43  	blockCacheMaxItems     = 8192             // Maximum number of blocks to cache before throttling the download
    44  	blockCacheInitialItems = 2048             // Initial number of blocks to start fetching, before we know the sizes of the blocks
    45  	blockCacheMemory       = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching
    46  	blockCacheSizeWeight   = 0.1              // Multiplier to approximate the average block size based on past ones
    47  )
    48  
    49  var (
    50  	errNoFetchesPending = errors.New("no fetches pending")
    51  	errStaleDelivery    = errors.New("stale delivery")
    52  )
    53  
    54  // fetchRequest is a currently running data retrieval operation.
    55  type fetchRequest struct {
    56  	Peer    *peerConnection // Peer to which the request was sent
    57  	From    uint64          // [eth/62] Requested chain element index (used for skeleton fills only)
    58  	Headers []*types.Header // [eth/62] Requested headers, sorted by request order
    59  	Time    time.Time       // Time when the request was made
    60  }
    61  
    62  // fetchResult is a struct collecting partial results from data fetchers until
    63  // all outstanding pieces complete and the result as a whole can be processed.
    64  type fetchResult struct {
    65  	pending int32 // Flag telling what deliveries are outstanding
    66  	pid     string
    67  
    68  	Header       *types.Header
    69  	Uncles       []*types.Header
    70  	Transactions types.Transactions
    71  	Receipts     types.Receipts
    72  }
    73  
    74  func newFetchResult(header *types.Header, fastSync bool, pid string) *fetchResult {
    75  	item := &fetchResult{
    76  		Header: header,
    77  		pid:    pid,
    78  	}
    79  	if !header.EmptyBody() {
    80  		item.pending |= (1 << bodyType)
    81  	}
    82  	if fastSync && !header.EmptyReceipts() {
    83  		item.pending |= (1 << receiptType)
    84  	}
    85  	return item
    86  }
    87  
    88  // SetBodyDone flags the body as finished.
    89  func (f *fetchResult) SetBodyDone() {
    90  	if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
    91  		atomic.AddInt32(&f.pending, -1)
    92  	}
    93  }
    94  
    95  // AllDone checks if item is done.
    96  func (f *fetchResult) AllDone() bool {
    97  	return atomic.LoadInt32(&f.pending) == 0
    98  }
    99  
   100  // SetReceiptsDone flags the receipts as finished.
   101  func (f *fetchResult) SetReceiptsDone() {
   102  	if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {
   103  		atomic.AddInt32(&f.pending, -2)
   104  	}
   105  }
   106  
   107  // Done checks if the given type is done already
   108  func (f *fetchResult) Done(kind uint) bool {
   109  	v := atomic.LoadInt32(&f.pending)
   110  	return v&(1<<kind) == 0
   111  }
   112  
   113  // queue represents hashes that are either need fetching or are being fetched
   114  type queue struct {
   115  	mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
   116  
   117  	// Headers are "special", they download in batches, supported by a skeleton chain
   118  	headerHead      common.Hash                    // Hash of the last queued header to verify order
   119  	headerTaskPool  map[uint64]*types.Header       // Pending header retrieval tasks, mapping starting indexes to skeleton headers
   120  	headerTaskQueue *prque.Prque                   // Priority queue of the skeleton indexes to fetch the filling headers for
   121  	headerPeerMiss  map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable
   122  	headerPendPool  map[string]*fetchRequest       // Currently pending header retrieval operations
   123  	headerResults   []*types.Header                // Result cache accumulating the completed headers
   124  	headerProced    int                            // Number of headers already processed from the results
   125  	headerOffset    uint64                         // Number of the first header in the result cache
   126  	headerContCh    chan bool                      // Channel to notify when header download finishes
   127  
   128  	// All data retrievals below are based on an already assembles header chain
   129  	blockTaskPool  map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers
   130  	blockTaskQueue *prque.Prque                  // Priority queue of the headers to fetch the blocks (bodies) for
   131  	blockPendPool  map[string]*fetchRequest      // Currently pending block (body) retrieval operations
   132  
   133  	receiptTaskPool  map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers
   134  	receiptTaskQueue *prque.Prque                  // Priority queue of the headers to fetch the receipts for
   135  	receiptPendPool  map[string]*fetchRequest      // Currently pending receipt retrieval operations
   136  
   137  	resultCache *resultStore       // Downloaded but not yet delivered fetch results
   138  	resultSize  common.StorageSize // Approximate size of a block (exponential moving average)
   139  
   140  	lock   *sync.RWMutex
   141  	active *sync.Cond
   142  	closed bool
   143  
   144  	lastStatLog time.Time
   145  }
   146  
   147  // newQueue creates a new download queue for scheduling block retrieval.
   148  func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue {
   149  	lock := new(sync.RWMutex)
   150  	q := &queue{
   151  		headerContCh:     make(chan bool),
   152  		blockTaskQueue:   prque.New(nil),
   153  		receiptTaskQueue: prque.New(nil),
   154  		active:           sync.NewCond(lock),
   155  		lock:             lock,
   156  	}
   157  	q.Reset(blockCacheLimit, thresholdInitialSize)
   158  	return q
   159  }
   160  
   161  // Reset clears out the queue contents.
   162  func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) {
   163  	q.lock.Lock()
   164  	defer q.lock.Unlock()
   165  
   166  	q.closed = false
   167  	q.mode = FullSync
   168  
   169  	q.headerHead = common.Hash{}
   170  	q.headerPendPool = make(map[string]*fetchRequest)
   171  
   172  	q.blockTaskPool = make(map[common.Hash]*types.Header)
   173  	q.blockTaskQueue.Reset()
   174  	q.blockPendPool = make(map[string]*fetchRequest)
   175  
   176  	q.receiptTaskPool = make(map[common.Hash]*types.Header)
   177  	q.receiptTaskQueue.Reset()
   178  	q.receiptPendPool = make(map[string]*fetchRequest)
   179  
   180  	q.resultCache = newResultStore(blockCacheLimit)
   181  	q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize))
   182  }
   183  
   184  // Close marks the end of the sync, unblocking Results.
   185  // It may be called even if the queue is already closed.
   186  func (q *queue) Close() {
   187  	q.lock.Lock()
   188  	q.closed = true
   189  	q.active.Signal()
   190  	q.lock.Unlock()
   191  }
   192  
   193  // PendingHeaders retrieves the number of header requests pending for retrieval.
   194  func (q *queue) PendingHeaders() int {
   195  	q.lock.Lock()
   196  	defer q.lock.Unlock()
   197  
   198  	return q.headerTaskQueue.Size()
   199  }
   200  
   201  // PendingBlocks retrieves the number of block (body) requests pending for retrieval.
   202  func (q *queue) PendingBlocks() int {
   203  	q.lock.Lock()
   204  	defer q.lock.Unlock()
   205  
   206  	return q.blockTaskQueue.Size()
   207  }
   208  
   209  // PendingReceipts retrieves the number of block receipts pending for retrieval.
   210  func (q *queue) PendingReceipts() int {
   211  	q.lock.Lock()
   212  	defer q.lock.Unlock()
   213  
   214  	return q.receiptTaskQueue.Size()
   215  }
   216  
   217  // InFlightHeaders retrieves whether there are header fetch requests currently
   218  // in flight.
   219  func (q *queue) InFlightHeaders() bool {
   220  	q.lock.Lock()
   221  	defer q.lock.Unlock()
   222  
   223  	return len(q.headerPendPool) > 0
   224  }
   225  
   226  // InFlightBlocks retrieves whether there are block fetch requests currently in
   227  // flight.
   228  func (q *queue) InFlightBlocks() bool {
   229  	q.lock.Lock()
   230  	defer q.lock.Unlock()
   231  
   232  	return len(q.blockPendPool) > 0
   233  }
   234  
   235  // InFlightReceipts retrieves whether there are receipt fetch requests currently
   236  // in flight.
   237  func (q *queue) InFlightReceipts() bool {
   238  	q.lock.Lock()
   239  	defer q.lock.Unlock()
   240  
   241  	return len(q.receiptPendPool) > 0
   242  }
   243  
   244  // Idle returns if the queue is fully idle or has some data still inside.
   245  func (q *queue) Idle() bool {
   246  	q.lock.Lock()
   247  	defer q.lock.Unlock()
   248  
   249  	queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()
   250  	pending := len(q.blockPendPool) + len(q.receiptPendPool)
   251  
   252  	return (queued + pending) == 0
   253  }
   254  
   255  // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
   256  // up an already retrieved header skeleton.
   257  func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
   258  	q.lock.Lock()
   259  	defer q.lock.Unlock()
   260  
   261  	// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
   262  	if q.headerResults != nil {
   263  		panic("skeleton assembly already in progress")
   264  	}
   265  	// Schedule all the header retrieval tasks for the skeleton assembly
   266  	q.headerTaskPool = make(map[uint64]*types.Header)
   267  	q.headerTaskQueue = prque.New(nil)
   268  	q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
   269  	q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
   270  	q.headerProced = 0
   271  	q.headerOffset = from
   272  	q.headerContCh = make(chan bool, 1)
   273  
   274  	for i, header := range skeleton {
   275  		index := from + uint64(i*MaxHeaderFetch)
   276  
   277  		q.headerTaskPool[index] = header
   278  		q.headerTaskQueue.Push(index, -int64(index))
   279  	}
   280  }
   281  
   282  // RetrieveHeaders retrieves the header chain assemble based on the scheduled
   283  // skeleton.
   284  func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
   285  	q.lock.Lock()
   286  	defer q.lock.Unlock()
   287  
   288  	headers, proced := q.headerResults, q.headerProced
   289  	q.headerResults, q.headerProced = nil, 0
   290  
   291  	return headers, proced
   292  }
   293  
   294  // Schedule adds a set of headers for the download queue for scheduling, returning
   295  // the new headers encountered.
   296  func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
   297  	q.lock.Lock()
   298  	defer q.lock.Unlock()
   299  
   300  	// Insert all the headers prioritised by the contained block number
   301  	inserts := make([]*types.Header, 0, len(headers))
   302  	for _, header := range headers {
   303  		// Make sure chain order is honoured and preserved throughout
   304  		hash := header.Hash()
   305  		if header.Number == nil || header.Number.Uint64() != from {
   306  			log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
   307  			break
   308  		}
   309  		if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
   310  			log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
   311  			break
   312  		}
   313  		// Make sure no duplicate requests are executed
   314  		// We cannot skip this, even if the block is empty, since this is
   315  		// what triggers the fetchResult creation.
   316  		if _, ok := q.blockTaskPool[hash]; ok {
   317  			log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
   318  		} else {
   319  			q.blockTaskPool[hash] = header
   320  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   321  		}
   322  		// Queue for receipt retrieval
   323  		if q.mode == FastSync && !header.EmptyReceipts() {
   324  			if _, ok := q.receiptTaskPool[hash]; ok {
   325  				log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
   326  			} else {
   327  				q.receiptTaskPool[hash] = header
   328  				q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   329  			}
   330  		}
   331  		inserts = append(inserts, header)
   332  		q.headerHead = hash
   333  		from++
   334  	}
   335  	return inserts
   336  }
   337  
   338  // Results retrieves and permanently removes a batch of fetch results from
   339  // the cache. the result slice will be empty if the queue has been closed.
   340  // Results can be called concurrently with Deliver and Schedule,
   341  // but assumes that there are not two simultaneous callers to Results
   342  func (q *queue) Results(block bool) []*fetchResult {
   343  	// Abort early if there are no items and non-blocking requested
   344  	if !block && !q.resultCache.HasCompletedItems() {
   345  		return nil
   346  	}
   347  	closed := false
   348  	for !closed && !q.resultCache.HasCompletedItems() {
   349  		// In order to wait on 'active', we need to obtain the lock.
   350  		// That may take a while, if someone is delivering at the same
   351  		// time, so after obtaining the lock, we check again if there
   352  		// are any results to fetch.
   353  		// Also, in-between we ask for the lock and the lock is obtained,
   354  		// someone can have closed the queue. In that case, we should
   355  		// return the available results and stop blocking
   356  		q.lock.Lock()
   357  		if q.resultCache.HasCompletedItems() || q.closed {
   358  			q.lock.Unlock()
   359  			break
   360  		}
   361  		// No items available, and not closed
   362  		q.active.Wait()
   363  		closed = q.closed
   364  		q.lock.Unlock()
   365  	}
   366  	// Regardless if closed or not, we can still deliver whatever we have
   367  	results := q.resultCache.GetCompleted(maxResultsProcess)
   368  	for _, result := range results {
   369  		// Recalculate the result item weights to prevent memory exhaustion
   370  		size := result.Header.Size()
   371  		for _, uncle := range result.Uncles {
   372  			size += uncle.Size()
   373  		}
   374  		for _, receipt := range result.Receipts {
   375  			size += receipt.Size()
   376  		}
   377  		for _, tx := range result.Transactions {
   378  			size += tx.Size()
   379  		}
   380  		q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
   381  			(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
   382  	}
   383  	// Using the newly calibrated resultsize, figure out the new throttle limit
   384  	// on the result cache
   385  	throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
   386  	throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
   387  
   388  	// Log some info at certain times
   389  	if time.Since(q.lastStatLog) > 60*time.Second {
   390  		q.lastStatLog = time.Now()
   391  		info := q.Stats()
   392  		info = append(info, "throttle", throttleThreshold)
   393  		log.Info("Downloader queue stats", info...)
   394  	}
   395  	return results
   396  }
   397  
   398  func (q *queue) Stats() []interface{} {
   399  	q.lock.RLock()
   400  	defer q.lock.RUnlock()
   401  
   402  	return q.stats()
   403  }
   404  
   405  func (q *queue) stats() []interface{} {
   406  	return []interface{}{
   407  		"receiptTasks", q.receiptTaskQueue.Size(),
   408  		"blockTasks", q.blockTaskQueue.Size(),
   409  		"itemSize", q.resultSize,
   410  	}
   411  }
   412  
   413  // ReserveHeaders reserves a set of headers for the given peer, skipping any
   414  // previously failed batches.
   415  func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
   416  	q.lock.Lock()
   417  	defer q.lock.Unlock()
   418  
   419  	// Short circuit if the peer's already downloading something (sanity check to
   420  	// not corrupt state)
   421  	if _, ok := q.headerPendPool[p.id]; ok {
   422  		return nil
   423  	}
   424  	// Retrieve a batch of hashes, skipping previously failed ones
   425  	send, skip := uint64(0), []uint64{}
   426  	for send == 0 && !q.headerTaskQueue.Empty() {
   427  		from, _ := q.headerTaskQueue.Pop()
   428  		if q.headerPeerMiss[p.id] != nil {
   429  			if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
   430  				skip = append(skip, from.(uint64))
   431  				continue
   432  			}
   433  		}
   434  		send = from.(uint64)
   435  	}
   436  	// Merge all the skipped batches back
   437  	for _, from := range skip {
   438  		q.headerTaskQueue.Push(from, -int64(from))
   439  	}
   440  	// Assemble and return the block download request
   441  	if send == 0 {
   442  		return nil
   443  	}
   444  	request := &fetchRequest{
   445  		Peer: p,
   446  		From: send,
   447  		Time: time.Now(),
   448  	}
   449  	q.headerPendPool[p.id] = request
   450  	return request
   451  }
   452  
   453  // ReserveBodies reserves a set of body fetches for the given peer, skipping any
   454  // previously failed downloads. Beside the next batch of needed fetches, it also
   455  // returns a flag whether empty blocks were queued requiring processing.
   456  func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   457  	q.lock.Lock()
   458  	defer q.lock.Unlock()
   459  
   460  	return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
   461  }
   462  
   463  // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
   464  // any previously failed downloads. Beside the next batch of needed fetches, it
   465  // also returns a flag whether empty receipts were queued requiring importing.
   466  func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   467  	q.lock.Lock()
   468  	defer q.lock.Unlock()
   469  
   470  	return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)
   471  }
   472  
   473  // reserveHeaders reserves a set of data download operations for a given peer,
   474  // skipping any previously failed ones. This method is a generic version used
   475  // by the individual special reservation functions.
   476  //
   477  // Note, this method expects the queue lock to be already held for writing. The
   478  // reason the lock is not obtained in here is because the parameters already need
   479  // to access the queue, so they already need a lock anyway.
   480  //
   481  // Returns:
   482  //   item     - the fetchRequest
   483  //   progress - whether any progress was made
   484  //   throttle - if the caller should throttle for a while
   485  func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
   486  	pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
   487  	// Short circuit if the pool has been depleted, or if the peer's already
   488  	// downloading something (sanity check not to corrupt state)
   489  	if taskQueue.Empty() {
   490  		return nil, false, true
   491  	}
   492  	if _, ok := pendPool[p.id]; ok {
   493  		return nil, false, false
   494  	}
   495  	// Retrieve a batch of tasks, skipping previously failed ones
   496  	send := make([]*types.Header, 0, count)
   497  	skip := make([]*types.Header, 0)
   498  	progress := false
   499  	throttled := false
   500  	for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
   501  		// the task queue will pop items in order, so the highest prio block
   502  		// is also the lowest block number.
   503  		h, _ := taskQueue.Peek()
   504  		header := h.(*types.Header)
   505  		// we can ask the resultcache if this header is within the
   506  		// "prioritized" segment of blocks. If it is not, we need to throttle
   507  
   508  		stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync, p.id)
   509  		if stale {
   510  			// Don't put back in the task queue, this item has already been
   511  			// delivered upstream
   512  			taskQueue.PopItem()
   513  			progress = true
   514  			delete(taskPool, header.Hash())
   515  			proc = proc - 1
   516  			log.Error("Fetch reservation already delivered", "number", header.Number.Uint64())
   517  			continue
   518  		}
   519  		if throttle {
   520  			// There are no resultslots available. Leave it in the task queue
   521  			// However, if there are any left as 'skipped', we should not tell
   522  			// the caller to throttle, since we still want some other
   523  			// peer to fetch those for us
   524  			throttled = len(skip) == 0
   525  			break
   526  		}
   527  		if err != nil {
   528  			// this most definitely should _not_ happen
   529  			log.Warn("Failed to reserve headers", "err", err)
   530  			// There are no resultslots available. Leave it in the task queue
   531  			break
   532  		}
   533  		if item.Done(kind) {
   534  			// If it's a noop, we can skip this task
   535  			delete(taskPool, header.Hash())
   536  			taskQueue.PopItem()
   537  			proc = proc - 1
   538  			progress = true
   539  			continue
   540  		}
   541  		// Remove it from the task queue
   542  		taskQueue.PopItem()
   543  		// Otherwise unless the peer is known not to have the data, add to the retrieve list
   544  		if p.Lacks(header.Hash()) {
   545  			skip = append(skip, header)
   546  		} else {
   547  			send = append(send, header)
   548  		}
   549  	}
   550  	// Merge all the skipped headers back
   551  	for _, header := range skip {
   552  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   553  	}
   554  	if q.resultCache.HasCompletedItems() {
   555  		// Wake Results, resultCache was modified
   556  		q.active.Signal()
   557  	}
   558  	// Assemble and return the block download request
   559  	if len(send) == 0 {
   560  		return nil, progress, throttled
   561  	}
   562  	request := &fetchRequest{
   563  		Peer:    p,
   564  		Headers: send,
   565  		Time:    time.Now(),
   566  	}
   567  	pendPool[p.id] = request
   568  	return request, progress, throttled
   569  }
   570  
   571  // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
   572  func (q *queue) CancelHeaders(request *fetchRequest) {
   573  	q.lock.Lock()
   574  	defer q.lock.Unlock()
   575  	q.cancel(request, q.headerTaskQueue, q.headerPendPool)
   576  }
   577  
   578  // CancelBodies aborts a body fetch request, returning all pending headers to the
   579  // task queue.
   580  func (q *queue) CancelBodies(request *fetchRequest) {
   581  	q.lock.Lock()
   582  	defer q.lock.Unlock()
   583  	q.cancel(request, q.blockTaskQueue, q.blockPendPool)
   584  }
   585  
   586  // CancelReceipts aborts a body fetch request, returning all pending headers to
   587  // the task queue.
   588  func (q *queue) CancelReceipts(request *fetchRequest) {
   589  	q.lock.Lock()
   590  	defer q.lock.Unlock()
   591  	q.cancel(request, q.receiptTaskQueue, q.receiptPendPool)
   592  }
   593  
   594  // Cancel aborts a fetch request, returning all pending hashes to the task queue.
   595  func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) {
   596  	if request.From > 0 {
   597  		taskQueue.Push(request.From, -int64(request.From))
   598  	}
   599  	for _, header := range request.Headers {
   600  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   601  	}
   602  	delete(pendPool, request.Peer.id)
   603  }
   604  
   605  // Revoke cancels all pending requests belonging to a given peer. This method is
   606  // meant to be called during a peer drop to quickly reassign owned data fetches
   607  // to remaining nodes.
   608  func (q *queue) Revoke(peerID string) {
   609  	q.lock.Lock()
   610  	defer q.lock.Unlock()
   611  
   612  	if request, ok := q.blockPendPool[peerID]; ok {
   613  		for _, header := range request.Headers {
   614  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   615  		}
   616  		delete(q.blockPendPool, peerID)
   617  	}
   618  	if request, ok := q.receiptPendPool[peerID]; ok {
   619  		for _, header := range request.Headers {
   620  			q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   621  		}
   622  		delete(q.receiptPendPool, peerID)
   623  	}
   624  }
   625  
   626  // ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
   627  // canceling them and returning the responsible peers for penalisation.
   628  func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
   629  	q.lock.Lock()
   630  	defer q.lock.Unlock()
   631  
   632  	return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
   633  }
   634  
   635  // ExpireBodies checks for in flight block body requests that exceeded a timeout
   636  // allowance, canceling them and returning the responsible peers for penalisation.
   637  func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
   638  	q.lock.Lock()
   639  	defer q.lock.Unlock()
   640  
   641  	return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)
   642  }
   643  
   644  // ExpireReceipts checks for in flight receipt requests that exceeded a timeout
   645  // allowance, canceling them and returning the responsible peers for penalisation.
   646  func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int {
   647  	q.lock.Lock()
   648  	defer q.lock.Unlock()
   649  
   650  	return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter)
   651  }
   652  
   653  // expire is the generic check that move expired tasks from a pending pool back
   654  // into a task pool, returning all entities caught with expired tasks.
   655  //
   656  // Note, this method expects the queue lock to be already held. The
   657  // reason the lock is not obtained in here is because the parameters already need
   658  // to access the queue, so they already need a lock anyway.
   659  func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
   660  	// Iterate over the expired requests and return each to the queue
   661  	expiries := make(map[string]int)
   662  	for id, request := range pendPool {
   663  		if time.Since(request.Time) > timeout {
   664  			// Update the metrics with the timeout
   665  			timeoutMeter.Mark(1)
   666  
   667  			// Return any non satisfied requests to the pool
   668  			if request.From > 0 {
   669  				taskQueue.Push(request.From, -int64(request.From))
   670  			}
   671  			for _, header := range request.Headers {
   672  				taskQueue.Push(header, -int64(header.Number.Uint64()))
   673  			}
   674  			// Add the peer to the expiry report along the number of failed requests
   675  			expiries[id] = len(request.Headers)
   676  
   677  			// Remove the expired requests from the pending pool directly
   678  			delete(pendPool, id)
   679  		}
   680  	}
   681  	return expiries
   682  }
   683  
   684  // DeliverHeaders injects a header retrieval response into the header results
   685  // cache. This method either accepts all headers it received, or none of them
   686  // if they do not map correctly to the skeleton.
   687  //
   688  // If the headers are accepted, the method makes an attempt to deliver the set
   689  // of ready headers to the processor to keep the pipeline full. However it will
   690  // not block to prevent stalling other pending deliveries.
   691  func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
   692  	q.lock.Lock()
   693  	defer q.lock.Unlock()
   694  
   695  	var logger log.Logger
   696  	if len(id) < 16 {
   697  		// Tests use short IDs, don't choke on them
   698  		logger = log.New("peer", id)
   699  	} else {
   700  		logger = log.New("peer", id[:16])
   701  	}
   702  	// Short circuit if the data was never requested
   703  	request := q.headerPendPool[id]
   704  	if request == nil {
   705  		return 0, errNoFetchesPending
   706  	}
   707  	headerReqTimer.UpdateSince(request.Time)
   708  	delete(q.headerPendPool, id)
   709  
   710  	// Ensure headers can be mapped onto the skeleton chain
   711  	target := q.headerTaskPool[request.From].Hash()
   712  
   713  	accepted := len(headers) == MaxHeaderFetch
   714  	if accepted {
   715  		if headers[0].Number.Uint64() != request.From {
   716  			logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", headers[0].Hash(), "expected", request.From)
   717  			accepted = false
   718  		} else if headers[len(headers)-1].Hash() != target {
   719  			logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target)
   720  			accepted = false
   721  		}
   722  	}
   723  	if accepted {
   724  		parentHash := headers[0].Hash()
   725  		for i, header := range headers[1:] {
   726  			hash := header.Hash()
   727  			if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
   728  				logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want)
   729  				accepted = false
   730  				break
   731  			}
   732  			if parentHash != header.ParentHash {
   733  				logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
   734  				accepted = false
   735  				break
   736  			}
   737  			// Set-up parent hash for next round
   738  			parentHash = hash
   739  		}
   740  	}
   741  	// If the batch of headers wasn't accepted, mark as unavailable
   742  	if !accepted {
   743  		logger.Trace("Skeleton filling not accepted", "from", request.From)
   744  
   745  		miss := q.headerPeerMiss[id]
   746  		if miss == nil {
   747  			q.headerPeerMiss[id] = make(map[uint64]struct{})
   748  			miss = q.headerPeerMiss[id]
   749  		}
   750  		miss[request.From] = struct{}{}
   751  
   752  		q.headerTaskQueue.Push(request.From, -int64(request.From))
   753  		return 0, errors.New("delivery not accepted")
   754  	}
   755  	// Clean up a successful fetch and try to deliver any sub-results
   756  	copy(q.headerResults[request.From-q.headerOffset:], headers)
   757  	delete(q.headerTaskPool, request.From)
   758  
   759  	ready := 0
   760  	for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
   761  		ready += MaxHeaderFetch
   762  	}
   763  	if ready > 0 {
   764  		// Headers are ready for delivery, gather them and push forward (non blocking)
   765  		process := make([]*types.Header, ready)
   766  		copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
   767  
   768  		select {
   769  		case headerProcCh <- process:
   770  			logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number)
   771  			q.headerProced += len(process)
   772  		default:
   773  		}
   774  	}
   775  	// Check for termination and return
   776  	if len(q.headerTaskPool) == 0 {
   777  		q.headerContCh <- false
   778  	}
   779  	return len(headers), nil
   780  }
   781  
   782  // DeliverBodies injects a block body retrieval response into the results queue.
   783  // The method returns the number of blocks bodies accepted from the delivery and
   784  // also wakes any threads waiting for data delivery.
   785  func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
   786  	q.lock.Lock()
   787  	defer q.lock.Unlock()
   788  	validate := func(index int, header *types.Header) error {
   789  		if types.DeriveSha(types.Transactions(txLists[index]), trie.NewStackTrie(nil)) != header.TxHash {
   790  			return errInvalidBody
   791  		}
   792  		if types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
   793  			return errInvalidBody
   794  		}
   795  		return nil
   796  	}
   797  
   798  	reconstruct := func(index int, result *fetchResult) {
   799  		result.Transactions = txLists[index]
   800  		result.Uncles = uncleLists[index]
   801  		result.SetBodyDone()
   802  	}
   803  	return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
   804  		bodyReqTimer, len(txLists), validate, reconstruct)
   805  }
   806  
   807  // DeliverReceipts injects a receipt retrieval response into the results queue.
   808  // The method returns the number of transaction receipts accepted from the delivery
   809  // and also wakes any threads waiting for data delivery.
   810  func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
   811  	q.lock.Lock()
   812  	defer q.lock.Unlock()
   813  	validate := func(index int, header *types.Header) error {
   814  		if types.DeriveSha(types.Receipts(receiptList[index]), trie.NewStackTrie(nil)) != header.ReceiptHash {
   815  			return errInvalidReceipt
   816  		}
   817  		return nil
   818  	}
   819  	reconstruct := func(index int, result *fetchResult) {
   820  		result.Receipts = receiptList[index]
   821  		result.SetReceiptsDone()
   822  	}
   823  	return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
   824  		receiptReqTimer, len(receiptList), validate, reconstruct)
   825  }
   826  
   827  // deliver injects a data retrieval response into the results queue.
   828  //
   829  // Note, this method expects the queue lock to be already held for writing. The
   830  // reason this lock is not obtained in here is because the parameters already need
   831  // to access the queue, so they already need a lock anyway.
   832  func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
   833  	taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer,
   834  	results int, validate func(index int, header *types.Header) error,
   835  	reconstruct func(index int, result *fetchResult)) (int, error) {
   836  
   837  	// Short circuit if the data was never requested
   838  	request := pendPool[id]
   839  	if request == nil {
   840  		return 0, errNoFetchesPending
   841  	}
   842  	reqTimer.UpdateSince(request.Time)
   843  	delete(pendPool, id)
   844  
   845  	// If no data items were retrieved, mark them as unavailable for the origin peer
   846  	if results == 0 {
   847  		for _, header := range request.Headers {
   848  			request.Peer.MarkLacking(header.Hash())
   849  		}
   850  	}
   851  	// Assemble each of the results with their headers and retrieved data parts
   852  	var (
   853  		accepted int
   854  		failure  error
   855  		i        int
   856  		hashes   []common.Hash
   857  	)
   858  	for _, header := range request.Headers {
   859  		// Short circuit assembly if no more fetch results are found
   860  		if i >= results {
   861  			break
   862  		}
   863  		// Validate the fields
   864  		if err := validate(i, header); err != nil {
   865  			failure = err
   866  			break
   867  		}
   868  		hashes = append(hashes, header.Hash())
   869  		i++
   870  	}
   871  
   872  	for _, header := range request.Headers[:i] {
   873  		if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil {
   874  			reconstruct(accepted, res)
   875  		} else {
   876  			// else: betweeen here and above, some other peer filled this result,
   877  			// or it was indeed a no-op. This should not happen, but if it does it's
   878  			// not something to panic about
   879  			log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)
   880  			failure = errStaleDelivery
   881  		}
   882  		// Clean up a successful fetch
   883  		delete(taskPool, hashes[accepted])
   884  		accepted++
   885  	}
   886  	// Return all failed or missing fetches to the queue
   887  	for _, header := range request.Headers[accepted:] {
   888  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   889  	}
   890  	// Wake up Results
   891  	if accepted > 0 {
   892  		q.active.Signal()
   893  	}
   894  	if failure == nil {
   895  		return accepted, nil
   896  	}
   897  	// If none of the data was good, it's a stale delivery
   898  	if accepted > 0 {
   899  		return accepted, fmt.Errorf("partial failure: %v", failure)
   900  	}
   901  	return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
   902  }
   903  
   904  // Prepare configures the result cache to allow accepting and caching inbound
   905  // fetch results.
   906  func (q *queue) Prepare(offset uint64, mode SyncMode) {
   907  	q.lock.Lock()
   908  	defer q.lock.Unlock()
   909  
   910  	// Prepare the queue for sync results
   911  	q.resultCache.Prepare(offset)
   912  	q.mode = mode
   913  }