github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/eth/downloader/queue.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Contains the block download scheduler to collect download tasks and schedule
    18  // them in an ordered, and throttled way.
    19  
    20  package downloader
    21  
    22  import (
    23  	"errors"
    24  	"fmt"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/common/prque"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/crypto/kzg4844"
    33  	"github.com/ethereum/go-ethereum/log"
    34  	"github.com/ethereum/go-ethereum/metrics"
    35  	"github.com/ethereum/go-ethereum/params"
    36  )
    37  
    38  const (
    39  	bodyType    = uint(0)
    40  	receiptType = uint(1)
    41  )
    42  
    43  var (
    44  	blockCacheMaxItems     = 8192              // Maximum number of blocks to cache before throttling the download
    45  	blockCacheInitialItems = 2048              // Initial number of blocks to start fetching, before we know the sizes of the blocks
    46  	blockCacheMemory       = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
    47  	blockCacheSizeWeight   = 0.1               // Multiplier to approximate the average block size based on past ones
    48  )
    49  
    50  var (
    51  	errNoFetchesPending = errors.New("no fetches pending")
    52  	errStaleDelivery    = errors.New("stale delivery")
    53  )
    54  
    55  // fetchRequest is a currently running data retrieval operation.
    56  type fetchRequest struct {
    57  	Peer    *peerConnection // Peer to which the request was sent
    58  	From    uint64          // Requested chain element index (used for skeleton fills only)
    59  	Headers []*types.Header // Requested headers, sorted by request order
    60  	Time    time.Time       // Time when the request was made
    61  }
    62  
    63  // fetchResult is a struct collecting partial results from data fetchers until
    64  // all outstanding pieces complete and the result as a whole can be processed.
    65  type fetchResult struct {
    66  	pending atomic.Int32 // Flag telling what deliveries are outstanding
    67  
    68  	Header       *types.Header
    69  	Uncles       []*types.Header
    70  	Transactions types.Transactions
    71  	Receipts     types.Receipts
    72  	Withdrawals  types.Withdrawals
    73  }
    74  
    75  func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
    76  	item := &fetchResult{
    77  		Header: header,
    78  	}
    79  	if !header.EmptyBody() {
    80  		item.pending.Store(item.pending.Load() | (1 << bodyType))
    81  	} else if header.WithdrawalsHash != nil {
    82  		item.Withdrawals = make(types.Withdrawals, 0)
    83  	}
    84  	if fastSync && !header.EmptyReceipts() {
    85  		item.pending.Store(item.pending.Load() | (1 << receiptType))
    86  	}
    87  	return item
    88  }
    89  
    90  // body returns a representation of the fetch result as a types.Body object.
    91  func (f *fetchResult) body() types.Body {
    92  	return types.Body{
    93  		Transactions: f.Transactions,
    94  		Uncles:       f.Uncles,
    95  		Withdrawals:  f.Withdrawals,
    96  	}
    97  }
    98  
    99  // SetBodyDone flags the body as finished.
   100  func (f *fetchResult) SetBodyDone() {
   101  	if v := f.pending.Load(); (v & (1 << bodyType)) != 0 {
   102  		f.pending.Add(-1)
   103  	}
   104  }
   105  
   106  // AllDone checks if item is done.
   107  func (f *fetchResult) AllDone() bool {
   108  	return f.pending.Load() == 0
   109  }
   110  
   111  // SetReceiptsDone flags the receipts as finished.
   112  func (f *fetchResult) SetReceiptsDone() {
   113  	if v := f.pending.Load(); (v & (1 << receiptType)) != 0 {
   114  		f.pending.Add(-2)
   115  	}
   116  }
   117  
   118  // Done checks if the given type is done already
   119  func (f *fetchResult) Done(kind uint) bool {
   120  	v := f.pending.Load()
   121  	return v&(1<<kind) == 0
   122  }
   123  
   124  // queue represents hashes that are either need fetching or are being fetched
   125  type queue struct {
   126  	mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
   127  
   128  	// Headers are "special", they download in batches, supported by a skeleton chain
   129  	headerHead      common.Hash                    // Hash of the last queued header to verify order
   130  	headerTaskPool  map[uint64]*types.Header       // Pending header retrieval tasks, mapping starting indexes to skeleton headers
   131  	headerTaskQueue *prque.Prque[int64, uint64]    // Priority queue of the skeleton indexes to fetch the filling headers for
   132  	headerPeerMiss  map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable
   133  	headerPendPool  map[string]*fetchRequest       // Currently pending header retrieval operations
   134  	headerResults   []*types.Header                // Result cache accumulating the completed headers
   135  	headerHashes    []common.Hash                  // Result cache accumulating the completed header hashes
   136  	headerProced    int                            // Number of headers already processed from the results
   137  	headerOffset    uint64                         // Number of the first header in the result cache
   138  	headerContCh    chan bool                      // Channel to notify when header download finishes
   139  
   140  	// All data retrievals below are based on an already assembles header chain
   141  	blockTaskPool  map[common.Hash]*types.Header      // Pending block (body) retrieval tasks, mapping hashes to headers
   142  	blockTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the blocks (bodies) for
   143  	blockPendPool  map[string]*fetchRequest           // Currently pending block (body) retrieval operations
   144  	blockWakeCh    chan bool                          // Channel to notify the block fetcher of new tasks
   145  
   146  	receiptTaskPool  map[common.Hash]*types.Header      // Pending receipt retrieval tasks, mapping hashes to headers
   147  	receiptTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the receipts for
   148  	receiptPendPool  map[string]*fetchRequest           // Currently pending receipt retrieval operations
   149  	receiptWakeCh    chan bool                          // Channel to notify when receipt fetcher of new tasks
   150  
   151  	resultCache *resultStore       // Downloaded but not yet delivered fetch results
   152  	resultSize  common.StorageSize // Approximate size of a block (exponential moving average)
   153  
   154  	lock   *sync.RWMutex
   155  	active *sync.Cond
   156  	closed bool
   157  
   158  	logTime time.Time // Time instance when status was last reported
   159  }
   160  
   161  // newQueue creates a new download queue for scheduling block retrieval.
   162  func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue {
   163  	lock := new(sync.RWMutex)
   164  	q := &queue{
   165  		headerContCh:     make(chan bool, 1),
   166  		blockTaskQueue:   prque.New[int64, *types.Header](nil),
   167  		blockWakeCh:      make(chan bool, 1),
   168  		receiptTaskQueue: prque.New[int64, *types.Header](nil),
   169  		receiptWakeCh:    make(chan bool, 1),
   170  		active:           sync.NewCond(lock),
   171  		lock:             lock,
   172  	}
   173  	q.Reset(blockCacheLimit, thresholdInitialSize)
   174  	return q
   175  }
   176  
   177  // Reset clears out the queue contents.
   178  func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) {
   179  	q.lock.Lock()
   180  	defer q.lock.Unlock()
   181  
   182  	q.closed = false
   183  	q.mode = FullSync
   184  
   185  	q.headerHead = common.Hash{}
   186  	q.headerPendPool = make(map[string]*fetchRequest)
   187  
   188  	q.blockTaskPool = make(map[common.Hash]*types.Header)
   189  	q.blockTaskQueue.Reset()
   190  	q.blockPendPool = make(map[string]*fetchRequest)
   191  
   192  	q.receiptTaskPool = make(map[common.Hash]*types.Header)
   193  	q.receiptTaskQueue.Reset()
   194  	q.receiptPendPool = make(map[string]*fetchRequest)
   195  
   196  	q.resultCache = newResultStore(blockCacheLimit)
   197  	q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize))
   198  }
   199  
   200  // Close marks the end of the sync, unblocking Results.
   201  // It may be called even if the queue is already closed.
   202  func (q *queue) Close() {
   203  	q.lock.Lock()
   204  	q.closed = true
   205  	q.active.Signal()
   206  	q.lock.Unlock()
   207  }
   208  
   209  // PendingHeaders retrieves the number of header requests pending for retrieval.
   210  func (q *queue) PendingHeaders() int {
   211  	q.lock.Lock()
   212  	defer q.lock.Unlock()
   213  
   214  	return q.headerTaskQueue.Size()
   215  }
   216  
   217  // PendingBodies retrieves the number of block body requests pending for retrieval.
   218  func (q *queue) PendingBodies() int {
   219  	q.lock.Lock()
   220  	defer q.lock.Unlock()
   221  
   222  	return q.blockTaskQueue.Size()
   223  }
   224  
   225  // PendingReceipts retrieves the number of block receipts pending for retrieval.
   226  func (q *queue) PendingReceipts() int {
   227  	q.lock.Lock()
   228  	defer q.lock.Unlock()
   229  
   230  	return q.receiptTaskQueue.Size()
   231  }
   232  
   233  // InFlightBlocks retrieves whether there are block fetch requests currently in
   234  // flight.
   235  func (q *queue) InFlightBlocks() bool {
   236  	q.lock.Lock()
   237  	defer q.lock.Unlock()
   238  
   239  	return len(q.blockPendPool) > 0
   240  }
   241  
   242  // InFlightReceipts retrieves whether there are receipt fetch requests currently
   243  // in flight.
   244  func (q *queue) InFlightReceipts() bool {
   245  	q.lock.Lock()
   246  	defer q.lock.Unlock()
   247  
   248  	return len(q.receiptPendPool) > 0
   249  }
   250  
   251  // Idle returns if the queue is fully idle or has some data still inside.
   252  func (q *queue) Idle() bool {
   253  	q.lock.Lock()
   254  	defer q.lock.Unlock()
   255  
   256  	queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()
   257  	pending := len(q.blockPendPool) + len(q.receiptPendPool)
   258  
   259  	return (queued + pending) == 0
   260  }
   261  
   262  // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
   263  // up an already retrieved header skeleton.
   264  func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
   265  	q.lock.Lock()
   266  	defer q.lock.Unlock()
   267  
   268  	// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
   269  	if q.headerResults != nil {
   270  		panic("skeleton assembly already in progress")
   271  	}
   272  	// Schedule all the header retrieval tasks for the skeleton assembly
   273  	q.headerTaskPool = make(map[uint64]*types.Header)
   274  	q.headerTaskQueue = prque.New[int64, uint64](nil)
   275  	q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
   276  	q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
   277  	q.headerHashes = make([]common.Hash, len(skeleton)*MaxHeaderFetch)
   278  	q.headerProced = 0
   279  	q.headerOffset = from
   280  	q.headerContCh = make(chan bool, 1)
   281  
   282  	for i, header := range skeleton {
   283  		index := from + uint64(i*MaxHeaderFetch)
   284  
   285  		q.headerTaskPool[index] = header
   286  		q.headerTaskQueue.Push(index, -int64(index))
   287  	}
   288  }
   289  
   290  // RetrieveHeaders retrieves the header chain assemble based on the scheduled
   291  // skeleton.
   292  func (q *queue) RetrieveHeaders() ([]*types.Header, []common.Hash, int) {
   293  	q.lock.Lock()
   294  	defer q.lock.Unlock()
   295  
   296  	headers, hashes, proced := q.headerResults, q.headerHashes, q.headerProced
   297  	q.headerResults, q.headerHashes, q.headerProced = nil, nil, 0
   298  
   299  	return headers, hashes, proced
   300  }
   301  
   302  // Schedule adds a set of headers for the download queue for scheduling, returning
   303  // the new headers encountered.
   304  func (q *queue) Schedule(headers []*types.Header, hashes []common.Hash, from uint64) []*types.Header {
   305  	q.lock.Lock()
   306  	defer q.lock.Unlock()
   307  
   308  	// Insert all the headers prioritised by the contained block number
   309  	inserts := make([]*types.Header, 0, len(headers))
   310  	for i, header := range headers {
   311  		// Make sure chain order is honoured and preserved throughout
   312  		hash := hashes[i]
   313  		if header.Number == nil || header.Number.Uint64() != from {
   314  			log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
   315  			break
   316  		}
   317  		if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
   318  			log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
   319  			break
   320  		}
   321  		// Make sure no duplicate requests are executed
   322  		// We cannot skip this, even if the block is empty, since this is
   323  		// what triggers the fetchResult creation.
   324  		if _, ok := q.blockTaskPool[hash]; ok {
   325  			log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
   326  		} else {
   327  			q.blockTaskPool[hash] = header
   328  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   329  		}
   330  		// Queue for receipt retrieval
   331  		if q.mode == SnapSync && !header.EmptyReceipts() {
   332  			if _, ok := q.receiptTaskPool[hash]; ok {
   333  				log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
   334  			} else {
   335  				q.receiptTaskPool[hash] = header
   336  				q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   337  			}
   338  		}
   339  		inserts = append(inserts, header)
   340  		q.headerHead = hash
   341  		from++
   342  	}
   343  	return inserts
   344  }
   345  
   346  // Results retrieves and permanently removes a batch of fetch results from
   347  // the cache. the result slice will be empty if the queue has been closed.
   348  // Results can be called concurrently with Deliver and Schedule,
   349  // but assumes that there are not two simultaneous callers to Results
   350  func (q *queue) Results(block bool) []*fetchResult {
   351  	// Abort early if there are no items and non-blocking requested
   352  	if !block && !q.resultCache.HasCompletedItems() {
   353  		return nil
   354  	}
   355  	closed := false
   356  	for !closed && !q.resultCache.HasCompletedItems() {
   357  		// In order to wait on 'active', we need to obtain the lock.
   358  		// That may take a while, if someone is delivering at the same
   359  		// time, so after obtaining the lock, we check again if there
   360  		// are any results to fetch.
   361  		// Also, in-between we ask for the lock and the lock is obtained,
   362  		// someone can have closed the queue. In that case, we should
   363  		// return the available results and stop blocking
   364  		q.lock.Lock()
   365  		if q.resultCache.HasCompletedItems() || q.closed {
   366  			q.lock.Unlock()
   367  			break
   368  		}
   369  		// No items available, and not closed
   370  		q.active.Wait()
   371  		closed = q.closed
   372  		q.lock.Unlock()
   373  	}
   374  	// Regardless if closed or not, we can still deliver whatever we have
   375  	results := q.resultCache.GetCompleted(maxResultsProcess)
   376  	for _, result := range results {
   377  		// Recalculate the result item weights to prevent memory exhaustion
   378  		size := result.Header.Size()
   379  		for _, uncle := range result.Uncles {
   380  			size += uncle.Size()
   381  		}
   382  		for _, receipt := range result.Receipts {
   383  			size += receipt.Size()
   384  		}
   385  		for _, tx := range result.Transactions {
   386  			size += common.StorageSize(tx.Size())
   387  		}
   388  		q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
   389  			(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
   390  	}
   391  	// Using the newly calibrated resultsize, figure out the new throttle limit
   392  	// on the result cache
   393  	throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
   394  	throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
   395  
   396  	// With results removed from the cache, wake throttled fetchers
   397  	for _, ch := range []chan bool{q.blockWakeCh, q.receiptWakeCh} {
   398  		select {
   399  		case ch <- true:
   400  		default:
   401  		}
   402  	}
   403  	// Log some info at certain times
   404  	if time.Since(q.logTime) >= 60*time.Second {
   405  		q.logTime = time.Now()
   406  
   407  		info := q.Stats()
   408  		info = append(info, "throttle", throttleThreshold)
   409  		log.Debug("Downloader queue stats", info...)
   410  	}
   411  	return results
   412  }
   413  
   414  func (q *queue) Stats() []interface{} {
   415  	q.lock.RLock()
   416  	defer q.lock.RUnlock()
   417  
   418  	return q.stats()
   419  }
   420  
   421  func (q *queue) stats() []interface{} {
   422  	return []interface{}{
   423  		"receiptTasks", q.receiptTaskQueue.Size(),
   424  		"blockTasks", q.blockTaskQueue.Size(),
   425  		"itemSize", q.resultSize,
   426  	}
   427  }
   428  
   429  // ReserveHeaders reserves a set of headers for the given peer, skipping any
   430  // previously failed batches.
   431  func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
   432  	q.lock.Lock()
   433  	defer q.lock.Unlock()
   434  
   435  	// Short circuit if the peer's already downloading something (sanity check to
   436  	// not corrupt state)
   437  	if _, ok := q.headerPendPool[p.id]; ok {
   438  		return nil
   439  	}
   440  	// Retrieve a batch of hashes, skipping previously failed ones
   441  	send, skip := uint64(0), []uint64{}
   442  	for send == 0 && !q.headerTaskQueue.Empty() {
   443  		from, _ := q.headerTaskQueue.Pop()
   444  		if q.headerPeerMiss[p.id] != nil {
   445  			if _, ok := q.headerPeerMiss[p.id][from]; ok {
   446  				skip = append(skip, from)
   447  				continue
   448  			}
   449  		}
   450  		send = from
   451  	}
   452  	// Merge all the skipped batches back
   453  	for _, from := range skip {
   454  		q.headerTaskQueue.Push(from, -int64(from))
   455  	}
   456  	// Assemble and return the block download request
   457  	if send == 0 {
   458  		return nil
   459  	}
   460  	request := &fetchRequest{
   461  		Peer: p,
   462  		From: send,
   463  		Time: time.Now(),
   464  	}
   465  	q.headerPendPool[p.id] = request
   466  	return request
   467  }
   468  
   469  // ReserveBodies reserves a set of body fetches for the given peer, skipping any
   470  // previously failed downloads. Beside the next batch of needed fetches, it also
   471  // returns a flag whether empty blocks were queued requiring processing.
   472  func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   473  	q.lock.Lock()
   474  	defer q.lock.Unlock()
   475  
   476  	return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
   477  }
   478  
   479  // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
   480  // any previously failed downloads. Beside the next batch of needed fetches, it
   481  // also returns a flag whether empty receipts were queued requiring importing.
   482  func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   483  	q.lock.Lock()
   484  	defer q.lock.Unlock()
   485  
   486  	return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)
   487  }
   488  
   489  // reserveHeaders reserves a set of data download operations for a given peer,
   490  // skipping any previously failed ones. This method is a generic version used
   491  // by the individual special reservation functions.
   492  //
   493  // Note, this method expects the queue lock to be already held for writing. The
   494  // reason the lock is not obtained in here is because the parameters already need
   495  // to access the queue, so they already need a lock anyway.
   496  //
   497  // Returns:
   498  //
   499  //	item     - the fetchRequest
   500  //	progress - whether any progress was made
   501  //	throttle - if the caller should throttle for a while
   502  func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque[int64, *types.Header],
   503  	pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
   504  	// Short circuit if the pool has been depleted, or if the peer's already
   505  	// downloading something (sanity check not to corrupt state)
   506  	if taskQueue.Empty() {
   507  		return nil, false, true
   508  	}
   509  	if _, ok := pendPool[p.id]; ok {
   510  		return nil, false, false
   511  	}
   512  	// Retrieve a batch of tasks, skipping previously failed ones
   513  	send := make([]*types.Header, 0, count)
   514  	skip := make([]*types.Header, 0)
   515  	progress := false
   516  	throttled := false
   517  	for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
   518  		// the task queue will pop items in order, so the highest prio block
   519  		// is also the lowest block number.
   520  		header, _ := taskQueue.Peek()
   521  
   522  		// we can ask the resultcache if this header is within the
   523  		// "prioritized" segment of blocks. If it is not, we need to throttle
   524  
   525  		stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == SnapSync)
   526  		if stale {
   527  			// Don't put back in the task queue, this item has already been
   528  			// delivered upstream
   529  			taskQueue.PopItem()
   530  			progress = true
   531  			delete(taskPool, header.Hash())
   532  			proc = proc - 1
   533  			log.Error("Fetch reservation already delivered", "number", header.Number.Uint64())
   534  			continue
   535  		}
   536  		if throttle {
   537  			// There are no resultslots available. Leave it in the task queue
   538  			// However, if there are any left as 'skipped', we should not tell
   539  			// the caller to throttle, since we still want some other
   540  			// peer to fetch those for us
   541  			throttled = len(skip) == 0
   542  			break
   543  		}
   544  		if err != nil {
   545  			// this most definitely should _not_ happen
   546  			log.Warn("Failed to reserve headers", "err", err)
   547  			// There are no resultslots available. Leave it in the task queue
   548  			break
   549  		}
   550  		if item.Done(kind) {
   551  			// If it's a noop, we can skip this task
   552  			delete(taskPool, header.Hash())
   553  			taskQueue.PopItem()
   554  			proc = proc - 1
   555  			progress = true
   556  			continue
   557  		}
   558  		// Remove it from the task queue
   559  		taskQueue.PopItem()
   560  		// Otherwise unless the peer is known not to have the data, add to the retrieve list
   561  		if p.Lacks(header.Hash()) {
   562  			skip = append(skip, header)
   563  		} else {
   564  			send = append(send, header)
   565  		}
   566  	}
   567  	// Merge all the skipped headers back
   568  	for _, header := range skip {
   569  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   570  	}
   571  	if q.resultCache.HasCompletedItems() {
   572  		// Wake Results, resultCache was modified
   573  		q.active.Signal()
   574  	}
   575  	// Assemble and return the block download request
   576  	if len(send) == 0 {
   577  		return nil, progress, throttled
   578  	}
   579  	request := &fetchRequest{
   580  		Peer:    p,
   581  		Headers: send,
   582  		Time:    time.Now(),
   583  	}
   584  	pendPool[p.id] = request
   585  	return request, progress, throttled
   586  }
   587  
   588  // Revoke cancels all pending requests belonging to a given peer. This method is
   589  // meant to be called during a peer drop to quickly reassign owned data fetches
   590  // to remaining nodes.
   591  func (q *queue) Revoke(peerID string) {
   592  	q.lock.Lock()
   593  	defer q.lock.Unlock()
   594  
   595  	if request, ok := q.headerPendPool[peerID]; ok {
   596  		q.headerTaskQueue.Push(request.From, -int64(request.From))
   597  		delete(q.headerPendPool, peerID)
   598  	}
   599  	if request, ok := q.blockPendPool[peerID]; ok {
   600  		for _, header := range request.Headers {
   601  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   602  		}
   603  		delete(q.blockPendPool, peerID)
   604  	}
   605  	if request, ok := q.receiptPendPool[peerID]; ok {
   606  		for _, header := range request.Headers {
   607  			q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   608  		}
   609  		delete(q.receiptPendPool, peerID)
   610  	}
   611  }
   612  
   613  // ExpireHeaders cancels a request that timed out and moves the pending fetch
   614  // task back into the queue for rescheduling.
   615  func (q *queue) ExpireHeaders(peer string) int {
   616  	q.lock.Lock()
   617  	defer q.lock.Unlock()
   618  
   619  	headerTimeoutMeter.Mark(1)
   620  	return q.expire(peer, q.headerPendPool, q.headerTaskQueue)
   621  }
   622  
   623  // ExpireBodies checks for in flight block body requests that exceeded a timeout
   624  // allowance, canceling them and returning the responsible peers for penalisation.
   625  func (q *queue) ExpireBodies(peer string) int {
   626  	q.lock.Lock()
   627  	defer q.lock.Unlock()
   628  
   629  	bodyTimeoutMeter.Mark(1)
   630  	return q.expire(peer, q.blockPendPool, q.blockTaskQueue)
   631  }
   632  
   633  // ExpireReceipts checks for in flight receipt requests that exceeded a timeout
   634  // allowance, canceling them and returning the responsible peers for penalisation.
   635  func (q *queue) ExpireReceipts(peer string) int {
   636  	q.lock.Lock()
   637  	defer q.lock.Unlock()
   638  
   639  	receiptTimeoutMeter.Mark(1)
   640  	return q.expire(peer, q.receiptPendPool, q.receiptTaskQueue)
   641  }
   642  
   643  // expire is the generic check that moves a specific expired task from a pending
   644  // pool back into a task pool. The syntax on the passed taskQueue is a bit weird
   645  // as we would need a generic expire method to handle both types, but that is not
   646  // supported at the moment at least (Go 1.19).
   647  //
   648  // Note, this method expects the queue lock to be already held. The reason the
   649  // lock is not obtained in here is that the parameters already need to access
   650  // the queue, so they already need a lock anyway.
   651  func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue interface{}) int {
   652  	// Retrieve the request being expired and log an error if it's non-existent,
   653  	// as there's no order of events that should lead to such expirations.
   654  	req := pendPool[peer]
   655  	if req == nil {
   656  		log.Error("Expired request does not exist", "peer", peer)
   657  		return 0
   658  	}
   659  	delete(pendPool, peer)
   660  
   661  	// Return any non-satisfied requests to the pool
   662  	if req.From > 0 {
   663  		taskQueue.(*prque.Prque[int64, uint64]).Push(req.From, -int64(req.From))
   664  	}
   665  	for _, header := range req.Headers {
   666  		taskQueue.(*prque.Prque[int64, *types.Header]).Push(header, -int64(header.Number.Uint64()))
   667  	}
   668  	return len(req.Headers)
   669  }
   670  
   671  // DeliverHeaders injects a header retrieval response into the header results
   672  // cache. This method either accepts all headers it received, or none of them
   673  // if they do not map correctly to the skeleton.
   674  //
   675  // If the headers are accepted, the method makes an attempt to deliver the set
   676  // of ready headers to the processor to keep the pipeline full. However, it will
   677  // not block to prevent stalling other pending deliveries.
   678  func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []common.Hash, headerProcCh chan *headerTask) (int, error) {
   679  	q.lock.Lock()
   680  	defer q.lock.Unlock()
   681  
   682  	var logger log.Logger
   683  	if len(id) < 16 {
   684  		// Tests use short IDs, don't choke on them
   685  		logger = log.New("peer", id)
   686  	} else {
   687  		logger = log.New("peer", id[:16])
   688  	}
   689  	// Short circuit if the data was never requested
   690  	request := q.headerPendPool[id]
   691  	if request == nil {
   692  		headerDropMeter.Mark(int64(len(headers)))
   693  		return 0, errNoFetchesPending
   694  	}
   695  	delete(q.headerPendPool, id)
   696  
   697  	headerReqTimer.UpdateSince(request.Time)
   698  	headerInMeter.Mark(int64(len(headers)))
   699  
   700  	// Ensure headers can be mapped onto the skeleton chain
   701  	target := q.headerTaskPool[request.From].Hash()
   702  
   703  	accepted := len(headers) == MaxHeaderFetch
   704  	if accepted {
   705  		if headers[0].Number.Uint64() != request.From {
   706  			logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", hashes[0], "expected", request.From)
   707  			accepted = false
   708  		} else if hashes[len(headers)-1] != target {
   709  			logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", hashes[len(headers)-1], "expected", target)
   710  			accepted = false
   711  		}
   712  	}
   713  	if accepted {
   714  		parentHash := hashes[0]
   715  		for i, header := range headers[1:] {
   716  			hash := hashes[i+1]
   717  			if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
   718  				logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want)
   719  				accepted = false
   720  				break
   721  			}
   722  			if parentHash != header.ParentHash {
   723  				logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
   724  				accepted = false
   725  				break
   726  			}
   727  			// Set-up parent hash for next round
   728  			parentHash = hash
   729  		}
   730  	}
   731  	// If the batch of headers wasn't accepted, mark as unavailable
   732  	if !accepted {
   733  		logger.Trace("Skeleton filling not accepted", "from", request.From)
   734  		headerDropMeter.Mark(int64(len(headers)))
   735  
   736  		miss := q.headerPeerMiss[id]
   737  		if miss == nil {
   738  			q.headerPeerMiss[id] = make(map[uint64]struct{})
   739  			miss = q.headerPeerMiss[id]
   740  		}
   741  		miss[request.From] = struct{}{}
   742  
   743  		q.headerTaskQueue.Push(request.From, -int64(request.From))
   744  		return 0, errors.New("delivery not accepted")
   745  	}
   746  	// Clean up a successful fetch and try to deliver any sub-results
   747  	copy(q.headerResults[request.From-q.headerOffset:], headers)
   748  	copy(q.headerHashes[request.From-q.headerOffset:], hashes)
   749  
   750  	delete(q.headerTaskPool, request.From)
   751  
   752  	ready := 0
   753  	for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
   754  		ready += MaxHeaderFetch
   755  	}
   756  	if ready > 0 {
   757  		// Headers are ready for delivery, gather them and push forward (non blocking)
   758  		processHeaders := make([]*types.Header, ready)
   759  		copy(processHeaders, q.headerResults[q.headerProced:q.headerProced+ready])
   760  
   761  		processHashes := make([]common.Hash, ready)
   762  		copy(processHashes, q.headerHashes[q.headerProced:q.headerProced+ready])
   763  
   764  		select {
   765  		case headerProcCh <- &headerTask{
   766  			headers: processHeaders,
   767  			hashes:  processHashes,
   768  		}:
   769  			logger.Trace("Pre-scheduled new headers", "count", len(processHeaders), "from", processHeaders[0].Number)
   770  			q.headerProced += len(processHeaders)
   771  		default:
   772  		}
   773  	}
   774  	// Check for termination and return
   775  	if len(q.headerTaskPool) == 0 {
   776  		q.headerContCh <- false
   777  	}
   778  	return len(headers), nil
   779  }
   780  
   781  // DeliverBodies injects a block body retrieval response into the results queue.
   782  // The method returns the number of blocks bodies accepted from the delivery and
   783  // also wakes any threads waiting for data delivery.
   784  func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash,
   785  	uncleLists [][]*types.Header, uncleListHashes []common.Hash,
   786  	withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash) (int, error) {
   787  	q.lock.Lock()
   788  	defer q.lock.Unlock()
   789  
   790  	validate := func(index int, header *types.Header) error {
   791  		if txListHashes[index] != header.TxHash {
   792  			return errInvalidBody
   793  		}
   794  		if uncleListHashes[index] != header.UncleHash {
   795  			return errInvalidBody
   796  		}
   797  		if header.WithdrawalsHash == nil {
   798  			// nil hash means that withdrawals should not be present in body
   799  			if withdrawalLists[index] != nil {
   800  				return errInvalidBody
   801  			}
   802  		} else { // non-nil hash: body must have withdrawals
   803  			if withdrawalLists[index] == nil {
   804  				return errInvalidBody
   805  			}
   806  			if withdrawalListHashes[index] != *header.WithdrawalsHash {
   807  				return errInvalidBody
   808  			}
   809  		}
   810  		// Blocks must have a number of blobs corresponding to the header gas usage,
   811  		// and zero before the Cancun hardfork.
   812  		var blobs int
   813  		for _, tx := range txLists[index] {
   814  			// Count the number of blobs to validate against the header's blobGasUsed
   815  			blobs += len(tx.BlobHashes())
   816  
   817  			// Validate the data blobs individually too
   818  			if tx.Type() == types.BlobTxType {
   819  				if len(tx.BlobHashes()) == 0 {
   820  					return errInvalidBody
   821  				}
   822  				for _, hash := range tx.BlobHashes() {
   823  					if !kzg4844.IsValidVersionedHash(hash[:]) {
   824  						return errInvalidBody
   825  					}
   826  				}
   827  				if tx.BlobTxSidecar() != nil {
   828  					return errInvalidBody
   829  				}
   830  			}
   831  		}
   832  		if header.BlobGasUsed != nil {
   833  			if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated
   834  				return errInvalidBody
   835  			}
   836  		} else {
   837  			if blobs != 0 {
   838  				return errInvalidBody
   839  			}
   840  		}
   841  		return nil
   842  	}
   843  
   844  	reconstruct := func(index int, result *fetchResult) {
   845  		result.Transactions = txLists[index]
   846  		result.Uncles = uncleLists[index]
   847  		result.Withdrawals = withdrawalLists[index]
   848  		result.SetBodyDone()
   849  	}
   850  	return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
   851  		bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct)
   852  }
   853  
   854  // DeliverReceipts injects a receipt retrieval response into the results queue.
   855  // The method returns the number of transaction receipts accepted from the delivery
   856  // and also wakes any threads waiting for data delivery.
   857  func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt, receiptListHashes []common.Hash) (int, error) {
   858  	q.lock.Lock()
   859  	defer q.lock.Unlock()
   860  
   861  	validate := func(index int, header *types.Header) error {
   862  		if receiptListHashes[index] != header.ReceiptHash {
   863  			return errInvalidReceipt
   864  		}
   865  		return nil
   866  	}
   867  	reconstruct := func(index int, result *fetchResult) {
   868  		result.Receipts = receiptList[index]
   869  		result.SetReceiptsDone()
   870  	}
   871  	return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
   872  		receiptReqTimer, receiptInMeter, receiptDropMeter, len(receiptList), validate, reconstruct)
   873  }
   874  
   875  // deliver injects a data retrieval response into the results queue.
   876  //
   877  // Note, this method expects the queue lock to be already held for writing. The
   878  // reason this lock is not obtained in here is because the parameters already need
   879  // to access the queue, so they already need a lock anyway.
   880  func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
   881  	taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest,
   882  	reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter,
   883  	results int, validate func(index int, header *types.Header) error,
   884  	reconstruct func(index int, result *fetchResult)) (int, error) {
   885  	// Short circuit if the data was never requested
   886  	request := pendPool[id]
   887  	if request == nil {
   888  		resDropMeter.Mark(int64(results))
   889  		return 0, errNoFetchesPending
   890  	}
   891  	delete(pendPool, id)
   892  
   893  	reqTimer.UpdateSince(request.Time)
   894  	resInMeter.Mark(int64(results))
   895  
   896  	// If no data items were retrieved, mark them as unavailable for the origin peer
   897  	if results == 0 {
   898  		for _, header := range request.Headers {
   899  			request.Peer.MarkLacking(header.Hash())
   900  		}
   901  	}
   902  	// Assemble each of the results with their headers and retrieved data parts
   903  	var (
   904  		accepted int
   905  		failure  error
   906  		i        int
   907  		hashes   []common.Hash
   908  	)
   909  	for _, header := range request.Headers {
   910  		// Short circuit assembly if no more fetch results are found
   911  		if i >= results {
   912  			break
   913  		}
   914  		// Validate the fields
   915  		if err := validate(i, header); err != nil {
   916  			failure = err
   917  			break
   918  		}
   919  		hashes = append(hashes, header.Hash())
   920  		i++
   921  	}
   922  
   923  	for _, header := range request.Headers[:i] {
   924  		if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil && !stale {
   925  			reconstruct(accepted, res)
   926  		} else {
   927  			// else: between here and above, some other peer filled this result,
   928  			// or it was indeed a no-op. This should not happen, but if it does it's
   929  			// not something to panic about
   930  			log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)
   931  			failure = errStaleDelivery
   932  		}
   933  		// Clean up a successful fetch
   934  		delete(taskPool, hashes[accepted])
   935  		accepted++
   936  	}
   937  	resDropMeter.Mark(int64(results - accepted))
   938  
   939  	// Return all failed or missing fetches to the queue
   940  	for _, header := range request.Headers[accepted:] {
   941  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   942  	}
   943  	// Wake up Results
   944  	if accepted > 0 {
   945  		q.active.Signal()
   946  	}
   947  	if failure == nil {
   948  		return accepted, nil
   949  	}
   950  	// If none of the data was good, it's a stale delivery
   951  	if accepted > 0 {
   952  		return accepted, fmt.Errorf("partial failure: %v", failure)
   953  	}
   954  	return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
   955  }
   956  
   957  // Prepare configures the result cache to allow accepting and caching inbound
   958  // fetch results.
   959  func (q *queue) Prepare(offset uint64, mode SyncMode) {
   960  	q.lock.Lock()
   961  	defer q.lock.Unlock()
   962  
   963  	// Prepare the queue for sync results
   964  	q.resultCache.Prepare(offset)
   965  	q.mode = mode
   966  }