github.com/aidoskuneen/adk-node@v0.0.0-20220315131952-2e32567cb7f4/eth/downloader/queue.go (about)

     1  // Copyright 2021 The adkgo Authors
     2  // This file is part of the adkgo library (adapted for adkgo from go--ethereum v1.10.8).
     3  //
     4  // the adkgo library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // the adkgo library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the adkgo library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Contains the block download scheduler to collect download tasks and schedule
    18  // them in an ordered, and throttled way.
    19  
    20  package downloader
    21  
    22  import (
    23  	"errors"
    24  	"fmt"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/aidoskuneen/adk-node/common"
    30  	"github.com/aidoskuneen/adk-node/common/prque"
    31  	"github.com/aidoskuneen/adk-node/core/types"
    32  	"github.com/aidoskuneen/adk-node/log"
    33  	"github.com/aidoskuneen/adk-node/metrics"
    34  	"github.com/aidoskuneen/adk-node/trie"
    35  )
    36  
    37  const (
    38  	bodyType    = uint(0)
    39  	receiptType = uint(1)
    40  )
    41  
    42  var (
    43  	blockCacheMaxItems     = 8192              // Maximum number of blocks to cache before throttling the download
    44  	blockCacheInitialItems = 2048              // Initial number of blocks to start fetching, before we know the sizes of the blocks
    45  	blockCacheMemory       = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
    46  	blockCacheSizeWeight   = 0.1               // Multiplier to approximate the average block size based on past ones
    47  )
    48  
    49  var (
    50  	errNoFetchesPending = errors.New("no fetches pending")
    51  	errStaleDelivery    = errors.New("stale delivery")
    52  )
    53  
    54  // fetchRequest is a currently running data retrieval operation.
    55  type fetchRequest struct {
    56  	Peer    *peerConnection // Peer to which the request was sent
    57  	From    uint64          // [eth/62] Requested chain element index (used for skeleton fills only)
    58  	Headers []*types.Header // [eth/62] Requested headers, sorted by request order
    59  	Time    time.Time       // Time when the request was made
    60  }
    61  
    62  // fetchResult is a struct collecting partial results from data fetchers until
    63  // all outstanding pieces complete and the result as a whole can be processed.
    64  type fetchResult struct {
    65  	pending int32 // Flag telling what deliveries are outstanding
    66  
    67  	Header       *types.Header
    68  	Uncles       []*types.Header
    69  	Transactions types.Transactions
    70  	Receipts     types.Receipts
    71  }
    72  
    73  func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
    74  	item := &fetchResult{
    75  		Header: header,
    76  	}
    77  	if !header.EmptyBody() {
    78  		item.pending |= (1 << bodyType)
    79  	}
    80  	if fastSync && !header.EmptyReceipts() {
    81  		item.pending |= (1 << receiptType)
    82  	}
    83  	return item
    84  }
    85  
    86  // SetBodyDone flags the body as finished.
    87  func (f *fetchResult) SetBodyDone() {
    88  	if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
    89  		atomic.AddInt32(&f.pending, -1)
    90  	}
    91  }
    92  
    93  // AllDone checks if item is done.
    94  func (f *fetchResult) AllDone() bool {
    95  	return atomic.LoadInt32(&f.pending) == 0
    96  }
    97  
    98  // SetReceiptsDone flags the receipts as finished.
    99  func (f *fetchResult) SetReceiptsDone() {
   100  	if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {
   101  		atomic.AddInt32(&f.pending, -2)
   102  	}
   103  }
   104  
   105  // Done checks if the given type is done already
   106  func (f *fetchResult) Done(kind uint) bool {
   107  	v := atomic.LoadInt32(&f.pending)
   108  	return v&(1<<kind) == 0
   109  }
   110  
   111  // queue represents hashes that are either need fetching or are being fetched
   112  type queue struct {
   113  	mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
   114  
   115  	// Headers are "special", they download in batches, supported by a skeleton chain
   116  	headerHead      common.Hash                    // Hash of the last queued header to verify order
   117  	headerTaskPool  map[uint64]*types.Header       // Pending header retrieval tasks, mapping starting indexes to skeleton headers
   118  	headerTaskQueue *prque.Prque                   // Priority queue of the skeleton indexes to fetch the filling headers for
   119  	headerPeerMiss  map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable
   120  	headerPendPool  map[string]*fetchRequest       // Currently pending header retrieval operations
   121  	headerResults   []*types.Header                // Result cache accumulating the completed headers
   122  	headerProced    int                            // Number of headers already processed from the results
   123  	headerOffset    uint64                         // Number of the first header in the result cache
   124  	headerContCh    chan bool                      // Channel to notify when header download finishes
   125  
   126  	// All data retrievals below are based on an already assembles header chain
   127  	blockTaskPool  map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers
   128  	blockTaskQueue *prque.Prque                  // Priority queue of the headers to fetch the blocks (bodies) for
   129  	blockPendPool  map[string]*fetchRequest      // Currently pending block (body) retrieval operations
   130  
   131  	receiptTaskPool  map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers
   132  	receiptTaskQueue *prque.Prque                  // Priority queue of the headers to fetch the receipts for
   133  	receiptPendPool  map[string]*fetchRequest      // Currently pending receipt retrieval operations
   134  
   135  	resultCache *resultStore       // Downloaded but not yet delivered fetch results
   136  	resultSize  common.StorageSize // Approximate size of a block (exponential moving average)
   137  
   138  	lock   *sync.RWMutex
   139  	active *sync.Cond
   140  	closed bool
   141  
   142  	lastStatLog time.Time
   143  }
   144  
   145  // newQueue creates a new download queue for scheduling block retrieval.
   146  func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue {
   147  	lock := new(sync.RWMutex)
   148  	q := &queue{
   149  		headerContCh:     make(chan bool),
   150  		blockTaskQueue:   prque.New(nil),
   151  		receiptTaskQueue: prque.New(nil),
   152  		active:           sync.NewCond(lock),
   153  		lock:             lock,
   154  	}
   155  	q.Reset(blockCacheLimit, thresholdInitialSize)
   156  	return q
   157  }
   158  
   159  // Reset clears out the queue contents.
   160  func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) {
   161  	q.lock.Lock()
   162  	defer q.lock.Unlock()
   163  
   164  	q.closed = false
   165  	q.mode = FullSync
   166  
   167  	q.headerHead = common.Hash{}
   168  	q.headerPendPool = make(map[string]*fetchRequest)
   169  
   170  	q.blockTaskPool = make(map[common.Hash]*types.Header)
   171  	q.blockTaskQueue.Reset()
   172  	q.blockPendPool = make(map[string]*fetchRequest)
   173  
   174  	q.receiptTaskPool = make(map[common.Hash]*types.Header)
   175  	q.receiptTaskQueue.Reset()
   176  	q.receiptPendPool = make(map[string]*fetchRequest)
   177  
   178  	q.resultCache = newResultStore(blockCacheLimit)
   179  	q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize))
   180  }
   181  
   182  // Close marks the end of the sync, unblocking Results.
   183  // It may be called even if the queue is already closed.
   184  func (q *queue) Close() {
   185  	q.lock.Lock()
   186  	q.closed = true
   187  	q.active.Signal()
   188  	q.lock.Unlock()
   189  }
   190  
   191  // PendingHeaders retrieves the number of header requests pending for retrieval.
   192  func (q *queue) PendingHeaders() int {
   193  	q.lock.Lock()
   194  	defer q.lock.Unlock()
   195  
   196  	return q.headerTaskQueue.Size()
   197  }
   198  
   199  // PendingBlocks retrieves the number of block (body) requests pending for retrieval.
   200  func (q *queue) PendingBlocks() int {
   201  	q.lock.Lock()
   202  	defer q.lock.Unlock()
   203  
   204  	return q.blockTaskQueue.Size()
   205  }
   206  
   207  // PendingReceipts retrieves the number of block receipts pending for retrieval.
   208  func (q *queue) PendingReceipts() int {
   209  	q.lock.Lock()
   210  	defer q.lock.Unlock()
   211  
   212  	return q.receiptTaskQueue.Size()
   213  }
   214  
   215  // InFlightHeaders retrieves whether there are header fetch requests currently
   216  // in flight.
   217  func (q *queue) InFlightHeaders() bool {
   218  	q.lock.Lock()
   219  	defer q.lock.Unlock()
   220  
   221  	return len(q.headerPendPool) > 0
   222  }
   223  
   224  // InFlightBlocks retrieves whether there are block fetch requests currently in
   225  // flight.
   226  func (q *queue) InFlightBlocks() bool {
   227  	q.lock.Lock()
   228  	defer q.lock.Unlock()
   229  
   230  	return len(q.blockPendPool) > 0
   231  }
   232  
   233  // InFlightReceipts retrieves whether there are receipt fetch requests currently
   234  // in flight.
   235  func (q *queue) InFlightReceipts() bool {
   236  	q.lock.Lock()
   237  	defer q.lock.Unlock()
   238  
   239  	return len(q.receiptPendPool) > 0
   240  }
   241  
   242  // Idle returns if the queue is fully idle or has some data still inside.
   243  func (q *queue) Idle() bool {
   244  	q.lock.Lock()
   245  	defer q.lock.Unlock()
   246  
   247  	queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()
   248  	pending := len(q.blockPendPool) + len(q.receiptPendPool)
   249  
   250  	return (queued + pending) == 0
   251  }
   252  
   253  // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
   254  // up an already retrieved header skeleton.
   255  func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
   256  	q.lock.Lock()
   257  	defer q.lock.Unlock()
   258  
   259  	// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
   260  	if q.headerResults != nil {
   261  		panic("skeleton assembly already in progress")
   262  	}
   263  	// Schedule all the header retrieval tasks for the skeleton assembly
   264  	q.headerTaskPool = make(map[uint64]*types.Header)
   265  	q.headerTaskQueue = prque.New(nil)
   266  	q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
   267  	q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
   268  	q.headerProced = 0
   269  	q.headerOffset = from
   270  	q.headerContCh = make(chan bool, 1)
   271  
   272  	for i, header := range skeleton {
   273  		index := from + uint64(i*MaxHeaderFetch)
   274  
   275  		q.headerTaskPool[index] = header
   276  		q.headerTaskQueue.Push(index, -int64(index))
   277  	}
   278  }
   279  
   280  // RetrieveHeaders retrieves the header chain assemble based on the scheduled
   281  // skeleton.
   282  func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
   283  	q.lock.Lock()
   284  	defer q.lock.Unlock()
   285  
   286  	headers, proced := q.headerResults, q.headerProced
   287  	q.headerResults, q.headerProced = nil, 0
   288  
   289  	return headers, proced
   290  }
   291  
   292  // Schedule adds a set of headers for the download queue for scheduling, returning
   293  // the new headers encountered.
   294  func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
   295  	q.lock.Lock()
   296  	defer q.lock.Unlock()
   297  
   298  	// Insert all the headers prioritised by the contained block number
   299  	inserts := make([]*types.Header, 0, len(headers))
   300  	for _, header := range headers {
   301  		// Make sure chain order is honoured and preserved throughout
   302  		hash := header.Hash()
   303  		if header.Number == nil || header.Number.Uint64() != from {
   304  			log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
   305  			break
   306  		}
   307  		if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
   308  			log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
   309  			break
   310  		}
   311  		// Make sure no duplicate requests are executed
   312  		// We cannot skip this, even if the block is empty, since this is
   313  		// what triggers the fetchResult creation.
   314  		if _, ok := q.blockTaskPool[hash]; ok {
   315  			log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
   316  		} else {
   317  			q.blockTaskPool[hash] = header
   318  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   319  		}
   320  		// Queue for receipt retrieval
   321  		if q.mode == FastSync && !header.EmptyReceipts() {
   322  			if _, ok := q.receiptTaskPool[hash]; ok {
   323  				log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
   324  			} else {
   325  				q.receiptTaskPool[hash] = header
   326  				q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   327  			}
   328  		}
   329  		inserts = append(inserts, header)
   330  		q.headerHead = hash
   331  		from++
   332  	}
   333  	return inserts
   334  }
   335  
   336  // Results retrieves and permanently removes a batch of fetch results from
   337  // the cache. the result slice will be empty if the queue has been closed.
   338  // Results can be called concurrently with Deliver and Schedule,
   339  // but assumes that there are not two simultaneous callers to Results
   340  func (q *queue) Results(block bool) []*fetchResult {
   341  	// Abort early if there are no items and non-blocking requested
   342  	if !block && !q.resultCache.HasCompletedItems() {
   343  		return nil
   344  	}
   345  	closed := false
   346  	for !closed && !q.resultCache.HasCompletedItems() {
   347  		// In order to wait on 'active', we need to obtain the lock.
   348  		// That may take a while, if someone is delivering at the same
   349  		// time, so after obtaining the lock, we check again if there
   350  		// are any results to fetch.
   351  		// Also, in-between we ask for the lock and the lock is obtained,
   352  		// someone can have closed the queue. In that case, we should
   353  		// return the available results and stop blocking
   354  		q.lock.Lock()
   355  		if q.resultCache.HasCompletedItems() || q.closed {
   356  			q.lock.Unlock()
   357  			break
   358  		}
   359  		// No items available, and not closed
   360  		q.active.Wait()
   361  		closed = q.closed
   362  		q.lock.Unlock()
   363  	}
   364  	// Regardless if closed or not, we can still deliver whatever we have
   365  	results := q.resultCache.GetCompleted(maxResultsProcess)
   366  	for _, result := range results {
   367  		// Recalculate the result item weights to prevent memory exhaustion
   368  		size := result.Header.Size()
   369  		for _, uncle := range result.Uncles {
   370  			size += uncle.Size()
   371  		}
   372  		for _, receipt := range result.Receipts {
   373  			size += receipt.Size()
   374  		}
   375  		for _, tx := range result.Transactions {
   376  			size += tx.Size()
   377  		}
   378  		q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
   379  			(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
   380  	}
   381  	// Using the newly calibrated resultsize, figure out the new throttle limit
   382  	// on the result cache
   383  	throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
   384  	throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
   385  
   386  	// Log some info at certain times
   387  	if time.Since(q.lastStatLog) > 60*time.Second {
   388  		q.lastStatLog = time.Now()
   389  		info := q.Stats()
   390  		info = append(info, "throttle", throttleThreshold)
   391  		log.Info("Downloader queue stats", info...)
   392  	}
   393  	return results
   394  }
   395  
   396  func (q *queue) Stats() []interface{} {
   397  	q.lock.RLock()
   398  	defer q.lock.RUnlock()
   399  
   400  	return q.stats()
   401  }
   402  
   403  func (q *queue) stats() []interface{} {
   404  	return []interface{}{
   405  		"receiptTasks", q.receiptTaskQueue.Size(),
   406  		"blockTasks", q.blockTaskQueue.Size(),
   407  		"itemSize", q.resultSize,
   408  	}
   409  }
   410  
   411  // ReserveHeaders reserves a set of headers for the given peer, skipping any
   412  // previously failed batches.
   413  func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
   414  	q.lock.Lock()
   415  	defer q.lock.Unlock()
   416  
   417  	// Short circuit if the peer's already downloading something (sanity check to
   418  	// not corrupt state)
   419  	if _, ok := q.headerPendPool[p.id]; ok {
   420  		return nil
   421  	}
   422  	// Retrieve a batch of hashes, skipping previously failed ones
   423  	send, skip := uint64(0), []uint64{}
   424  	for send == 0 && !q.headerTaskQueue.Empty() {
   425  		from, _ := q.headerTaskQueue.Pop()
   426  		if q.headerPeerMiss[p.id] != nil {
   427  			if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
   428  				skip = append(skip, from.(uint64))
   429  				continue
   430  			}
   431  		}
   432  		send = from.(uint64)
   433  	}
   434  	// Merge all the skipped batches back
   435  	for _, from := range skip {
   436  		q.headerTaskQueue.Push(from, -int64(from))
   437  	}
   438  	// Assemble and return the block download request
   439  	if send == 0 {
   440  		return nil
   441  	}
   442  	request := &fetchRequest{
   443  		Peer: p,
   444  		From: send,
   445  		Time: time.Now(),
   446  	}
   447  	q.headerPendPool[p.id] = request
   448  	return request
   449  }
   450  
   451  // ReserveBodies reserves a set of body fetches for the given peer, skipping any
   452  // previously failed downloads. Beside the next batch of needed fetches, it also
   453  // returns a flag whether empty blocks were queued requiring processing.
   454  func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   455  	q.lock.Lock()
   456  	defer q.lock.Unlock()
   457  
   458  	return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
   459  }
   460  
   461  // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
   462  // any previously failed downloads. Beside the next batch of needed fetches, it
   463  // also returns a flag whether empty receipts were queued requiring importing.
   464  func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   465  	q.lock.Lock()
   466  	defer q.lock.Unlock()
   467  
   468  	return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)
   469  }
   470  
   471  // reserveHeaders reserves a set of data download operations for a given peer,
   472  // skipping any previously failed ones. This method is a generic version used
   473  // by the individual special reservation functions.
   474  //
   475  // Note, this method expects the queue lock to be already held for writing. The
   476  // reason the lock is not obtained in here is because the parameters already need
   477  // to access the queue, so they already need a lock anyway.
   478  //
   479  // Returns:
   480  //   item     - the fetchRequest
   481  //   progress - whether any progress was made
   482  //   throttle - if the caller should throttle for a while
   483  func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
   484  	pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
   485  	// Short circuit if the pool has been depleted, or if the peer's already
   486  	// downloading something (sanity check not to corrupt state)
   487  	if taskQueue.Empty() {
   488  		return nil, false, true
   489  	}
   490  	if _, ok := pendPool[p.id]; ok {
   491  		return nil, false, false
   492  	}
   493  	// Retrieve a batch of tasks, skipping previously failed ones
   494  	send := make([]*types.Header, 0, count)
   495  	skip := make([]*types.Header, 0)
   496  	progress := false
   497  	throttled := false
   498  	for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
   499  		// the task queue will pop items in order, so the highest prio block
   500  		// is also the lowest block number.
   501  		h, _ := taskQueue.Peek()
   502  		header := h.(*types.Header)
   503  		// we can ask the resultcache if this header is within the
   504  		// "prioritized" segment of blocks. If it is not, we need to throttle
   505  
   506  		stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync)
   507  		if stale {
   508  			// Don't put back in the task queue, this item has already been
   509  			// delivered upstream
   510  			taskQueue.PopItem()
   511  			progress = true
   512  			delete(taskPool, header.Hash())
   513  			proc = proc - 1
   514  			log.Error("Fetch reservation already delivered", "number", header.Number.Uint64())
   515  			continue
   516  		}
   517  		if throttle {
   518  			// There are no resultslots available. Leave it in the task queue
   519  			// However, if there are any left as 'skipped', we should not tell
   520  			// the caller to throttle, since we still want some other
   521  			// peer to fetch those for us
   522  			throttled = len(skip) == 0
   523  			break
   524  		}
   525  		if err != nil {
   526  			// this most definitely should _not_ happen
   527  			log.Warn("Failed to reserve headers", "err", err)
   528  			// There are no resultslots available. Leave it in the task queue
   529  			break
   530  		}
   531  		if item.Done(kind) {
   532  			// If it's a noop, we can skip this task
   533  			delete(taskPool, header.Hash())
   534  			taskQueue.PopItem()
   535  			proc = proc - 1
   536  			progress = true
   537  			continue
   538  		}
   539  		// Remove it from the task queue
   540  		taskQueue.PopItem()
   541  		// Otherwise unless the peer is known not to have the data, add to the retrieve list
   542  		if p.Lacks(header.Hash()) {
   543  			skip = append(skip, header)
   544  		} else {
   545  			send = append(send, header)
   546  		}
   547  	}
   548  	// Merge all the skipped headers back
   549  	for _, header := range skip {
   550  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   551  	}
   552  	if q.resultCache.HasCompletedItems() {
   553  		// Wake Results, resultCache was modified
   554  		q.active.Signal()
   555  	}
   556  	// Assemble and return the block download request
   557  	if len(send) == 0 {
   558  		return nil, progress, throttled
   559  	}
   560  	request := &fetchRequest{
   561  		Peer:    p,
   562  		Headers: send,
   563  		Time:    time.Now(),
   564  	}
   565  	pendPool[p.id] = request
   566  	return request, progress, throttled
   567  }
   568  
   569  // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
   570  func (q *queue) CancelHeaders(request *fetchRequest) {
   571  	q.lock.Lock()
   572  	defer q.lock.Unlock()
   573  	q.cancel(request, q.headerTaskQueue, q.headerPendPool)
   574  }
   575  
   576  // CancelBodies aborts a body fetch request, returning all pending headers to the
   577  // task queue.
   578  func (q *queue) CancelBodies(request *fetchRequest) {
   579  	q.lock.Lock()
   580  	defer q.lock.Unlock()
   581  	q.cancel(request, q.blockTaskQueue, q.blockPendPool)
   582  }
   583  
   584  // CancelReceipts aborts a body fetch request, returning all pending headers to
   585  // the task queue.
   586  func (q *queue) CancelReceipts(request *fetchRequest) {
   587  	q.lock.Lock()
   588  	defer q.lock.Unlock()
   589  	q.cancel(request, q.receiptTaskQueue, q.receiptPendPool)
   590  }
   591  
   592  // Cancel aborts a fetch request, returning all pending hashes to the task queue.
   593  func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) {
   594  	if request.From > 0 {
   595  		taskQueue.Push(request.From, -int64(request.From))
   596  	}
   597  	for _, header := range request.Headers {
   598  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   599  	}
   600  	delete(pendPool, request.Peer.id)
   601  }
   602  
   603  // Revoke cancels all pending requests belonging to a given peer. This method is
   604  // meant to be called during a peer drop to quickly reassign owned data fetches
   605  // to remaining nodes.
   606  func (q *queue) Revoke(peerID string) {
   607  	q.lock.Lock()
   608  	defer q.lock.Unlock()
   609  
   610  	if request, ok := q.blockPendPool[peerID]; ok {
   611  		for _, header := range request.Headers {
   612  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   613  		}
   614  		delete(q.blockPendPool, peerID)
   615  	}
   616  	if request, ok := q.receiptPendPool[peerID]; ok {
   617  		for _, header := range request.Headers {
   618  			q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   619  		}
   620  		delete(q.receiptPendPool, peerID)
   621  	}
   622  }
   623  
   624  // ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
   625  // canceling them and returning the responsible peers for penalisation.
   626  func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
   627  	q.lock.Lock()
   628  	defer q.lock.Unlock()
   629  
   630  	return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
   631  }
   632  
   633  // ExpireBodies checks for in flight block body requests that exceeded a timeout
   634  // allowance, canceling them and returning the responsible peers for penalisation.
   635  func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
   636  	q.lock.Lock()
   637  	defer q.lock.Unlock()
   638  
   639  	return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)
   640  }
   641  
   642  // ExpireReceipts checks for in flight receipt requests that exceeded a timeout
   643  // allowance, canceling them and returning the responsible peers for penalisation.
   644  func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int {
   645  	q.lock.Lock()
   646  	defer q.lock.Unlock()
   647  
   648  	return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter)
   649  }
   650  
   651  // expire is the generic check that move expired tasks from a pending pool back
   652  // into a task pool, returning all entities caught with expired tasks.
   653  //
   654  // Note, this method expects the queue lock to be already held. The
   655  // reason the lock is not obtained in here is because the parameters already need
   656  // to access the queue, so they already need a lock anyway.
   657  func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
   658  	// Iterate over the expired requests and return each to the queue
   659  	expiries := make(map[string]int)
   660  	for id, request := range pendPool {
   661  		if time.Since(request.Time) > timeout {
   662  			// Update the metrics with the timeout
   663  			timeoutMeter.Mark(1)
   664  
   665  			// Return any non satisfied requests to the pool
   666  			if request.From > 0 {
   667  				taskQueue.Push(request.From, -int64(request.From))
   668  			}
   669  			for _, header := range request.Headers {
   670  				taskQueue.Push(header, -int64(header.Number.Uint64()))
   671  			}
   672  			// Add the peer to the expiry report along the number of failed requests
   673  			expiries[id] = len(request.Headers)
   674  
   675  			// Remove the expired requests from the pending pool directly
   676  			delete(pendPool, id)
   677  		}
   678  	}
   679  	return expiries
   680  }
   681  
   682  // DeliverHeaders injects a header retrieval response into the header results
   683  // cache. This method either accepts all headers it received, or none of them
   684  // if they do not map correctly to the skeleton.
   685  //
   686  // If the headers are accepted, the method makes an attempt to deliver the set
   687  // of ready headers to the processor to keep the pipeline full. However it will
   688  // not block to prevent stalling other pending deliveries.
   689  func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
   690  	q.lock.Lock()
   691  	defer q.lock.Unlock()
   692  
   693  	var logger log.Logger
   694  	if len(id) < 16 {
   695  		// Tests use short IDs, don't choke on them
   696  		logger = log.New("peer", id)
   697  	} else {
   698  		logger = log.New("peer", id[:16])
   699  	}
   700  	// Short circuit if the data was never requested
   701  	request := q.headerPendPool[id]
   702  	if request == nil {
   703  		return 0, errNoFetchesPending
   704  	}
   705  	headerReqTimer.UpdateSince(request.Time)
   706  	delete(q.headerPendPool, id)
   707  
   708  	// Ensure headers can be mapped onto the skeleton chain
   709  	target := q.headerTaskPool[request.From].Hash()
   710  
   711  	accepted := len(headers) == MaxHeaderFetch
   712  	if accepted {
   713  		if headers[0].Number.Uint64() != request.From {
   714  			logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", headers[0].Hash(), "expected", request.From)
   715  			accepted = false
   716  		} else if headers[len(headers)-1].Hash() != target {
   717  			logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target)
   718  			accepted = false
   719  		}
   720  	}
   721  	if accepted {
   722  		parentHash := headers[0].Hash()
   723  		for i, header := range headers[1:] {
   724  			hash := header.Hash()
   725  			if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
   726  				logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want)
   727  				accepted = false
   728  				break
   729  			}
   730  			if parentHash != header.ParentHash {
   731  				logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
   732  				accepted = false
   733  				break
   734  			}
   735  			// Set-up parent hash for next round
   736  			parentHash = hash
   737  		}
   738  	}
   739  	// If the batch of headers wasn't accepted, mark as unavailable
   740  	if !accepted {
   741  		logger.Trace("Skeleton filling not accepted", "from", request.From)
   742  
   743  		miss := q.headerPeerMiss[id]
   744  		if miss == nil {
   745  			q.headerPeerMiss[id] = make(map[uint64]struct{})
   746  			miss = q.headerPeerMiss[id]
   747  		}
   748  		miss[request.From] = struct{}{}
   749  
   750  		q.headerTaskQueue.Push(request.From, -int64(request.From))
   751  		return 0, errors.New("delivery not accepted")
   752  	}
   753  	// Clean up a successful fetch and try to deliver any sub-results
   754  	copy(q.headerResults[request.From-q.headerOffset:], headers)
   755  	delete(q.headerTaskPool, request.From)
   756  
   757  	ready := 0
   758  	for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
   759  		ready += MaxHeaderFetch
   760  	}
   761  	if ready > 0 {
   762  		// Headers are ready for delivery, gather them and push forward (non blocking)
   763  		process := make([]*types.Header, ready)
   764  		copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
   765  
   766  		select {
   767  		case headerProcCh <- process:
   768  			logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number)
   769  			q.headerProced += len(process)
   770  		default:
   771  		}
   772  	}
   773  	// Check for termination and return
   774  	if len(q.headerTaskPool) == 0 {
   775  		q.headerContCh <- false
   776  	}
   777  	return len(headers), nil
   778  }
   779  
   780  // DeliverBodies injects a block body retrieval response into the results queue.
   781  // The method returns the number of blocks bodies accepted from the delivery and
   782  // also wakes any threads waiting for data delivery.
   783  func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
   784  	q.lock.Lock()
   785  	defer q.lock.Unlock()
   786  	trieHasher := trie.NewStackTrie(nil)
   787  	validate := func(index int, header *types.Header) error {
   788  		if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash {
   789  			return errInvalidBody
   790  		}
   791  		if types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
   792  			return errInvalidBody
   793  		}
   794  		return nil
   795  	}
   796  
   797  	reconstruct := func(index int, result *fetchResult) {
   798  		result.Transactions = txLists[index]
   799  		result.Uncles = uncleLists[index]
   800  		result.SetBodyDone()
   801  	}
   802  	return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
   803  		bodyReqTimer, len(txLists), validate, reconstruct)
   804  }
   805  
   806  // DeliverReceipts injects a receipt retrieval response into the results queue.
   807  // The method returns the number of transaction receipts accepted from the delivery
   808  // and also wakes any threads waiting for data delivery.
   809  func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
   810  	q.lock.Lock()
   811  	defer q.lock.Unlock()
   812  	trieHasher := trie.NewStackTrie(nil)
   813  	validate := func(index int, header *types.Header) error {
   814  		if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash {
   815  			return errInvalidReceipt
   816  		}
   817  		return nil
   818  	}
   819  	reconstruct := func(index int, result *fetchResult) {
   820  		result.Receipts = receiptList[index]
   821  		result.SetReceiptsDone()
   822  	}
   823  	return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
   824  		receiptReqTimer, len(receiptList), validate, reconstruct)
   825  }
   826  
   827  // deliver injects a data retrieval response into the results queue.
   828  //
   829  // Note, this method expects the queue lock to be already held for writing. The
   830  // reason this lock is not obtained in here is because the parameters already need
   831  // to access the queue, so they already need a lock anyway.
   832  func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
   833  	taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer,
   834  	results int, validate func(index int, header *types.Header) error,
   835  	reconstruct func(index int, result *fetchResult)) (int, error) {
   836  
   837  	// Short circuit if the data was never requested
   838  	request := pendPool[id]
   839  	if request == nil {
   840  		return 0, errNoFetchesPending
   841  	}
   842  	reqTimer.UpdateSince(request.Time)
   843  	delete(pendPool, id)
   844  
   845  	// If no data items were retrieved, mark them as unavailable for the origin peer
   846  	if results == 0 {
   847  		for _, header := range request.Headers {
   848  			request.Peer.MarkLacking(header.Hash())
   849  		}
   850  	}
   851  	// Assemble each of the results with their headers and retrieved data parts
   852  	var (
   853  		accepted int
   854  		failure  error
   855  		i        int
   856  		hashes   []common.Hash
   857  	)
   858  	for _, header := range request.Headers {
   859  		// Short circuit assembly if no more fetch results are found
   860  		if i >= results {
   861  			break
   862  		}
   863  		// Validate the fields
   864  		if err := validate(i, header); err != nil {
   865  			failure = err
   866  			break
   867  		}
   868  		hashes = append(hashes, header.Hash())
   869  		i++
   870  	}
   871  
   872  	for _, header := range request.Headers[:i] {
   873  		if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil {
   874  			reconstruct(accepted, res)
   875  		} else {
   876  			// else: betweeen here and above, some other peer filled this result,
   877  			// or it was indeed a no-op. This should not happen, but if it does it's
   878  			// not something to panic about
   879  			log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)
   880  			failure = errStaleDelivery
   881  		}
   882  		// Clean up a successful fetch
   883  		delete(taskPool, hashes[accepted])
   884  		accepted++
   885  	}
   886  	// Return all failed or missing fetches to the queue
   887  	for _, header := range request.Headers[accepted:] {
   888  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   889  	}
   890  	// Wake up Results
   891  	if accepted > 0 {
   892  		q.active.Signal()
   893  	}
   894  	if failure == nil {
   895  		return accepted, nil
   896  	}
   897  	// If none of the data was good, it's a stale delivery
   898  	if accepted > 0 {
   899  		return accepted, fmt.Errorf("partial failure: %v", failure)
   900  	}
   901  	return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
   902  }
   903  
   904  // Prepare configures the result cache to allow accepting and caching inbound
   905  // fetch results.
   906  func (q *queue) Prepare(offset uint64, mode SyncMode) {
   907  	q.lock.Lock()
   908  	defer q.lock.Unlock()
   909  
   910  	// Prepare the queue for sync results
   911  	q.resultCache.Prepare(offset)
   912  	q.mode = mode
   913  }