github.com/theQRL/go-zond@v0.1.1/zond/downloader/queue.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Contains the block download scheduler to collect download tasks and schedule
    18  // them in an ordered, and throttled way.
    19  
    20  package downloader
    21  
    22  import (
    23  	"errors"
    24  	"fmt"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/theQRL/go-zond/common"
    30  	"github.com/theQRL/go-zond/common/prque"
    31  	"github.com/theQRL/go-zond/core/types"
    32  	"github.com/theQRL/go-zond/log"
    33  	"github.com/theQRL/go-zond/metrics"
    34  	"github.com/theQRL/go-zond/params"
    35  )
    36  
    37  const (
    38  	bodyType    = uint(0)
    39  	receiptType = uint(1)
    40  )
    41  
    42  var (
    43  	blockCacheMaxItems     = 8192              // Maximum number of blocks to cache before throttling the download
    44  	blockCacheInitialItems = 2048              // Initial number of blocks to start fetching, before we know the sizes of the blocks
    45  	blockCacheMemory       = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
    46  	blockCacheSizeWeight   = 0.1               // Multiplier to approximate the average block size based on past ones
    47  )
    48  
    49  var (
    50  	errNoFetchesPending = errors.New("no fetches pending")
    51  	errStaleDelivery    = errors.New("stale delivery")
    52  )
    53  
    54  // fetchRequest is a currently running data retrieval operation.
    55  type fetchRequest struct {
    56  	Peer    *peerConnection // Peer to which the request was sent
    57  	From    uint64          // Requested chain element index (used for skeleton fills only)
    58  	Headers []*types.Header // Requested headers, sorted by request order
    59  	Time    time.Time       // Time when the request was made
    60  }
    61  
    62  // fetchResult is a struct collecting partial results from data fetchers until
    63  // all outstanding pieces complete and the result as a whole can be processed.
    64  type fetchResult struct {
    65  	pending atomic.Int32 // Flag telling what deliveries are outstanding
    66  
    67  	Header       *types.Header
    68  	Uncles       []*types.Header
    69  	Transactions types.Transactions
    70  	Receipts     types.Receipts
    71  	Withdrawals  types.Withdrawals
    72  }
    73  
    74  func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
    75  	item := &fetchResult{
    76  		Header: header,
    77  	}
    78  	if !header.EmptyBody() {
    79  		item.pending.Store(item.pending.Load() | (1 << bodyType))
    80  	} else if header.WithdrawalsHash != nil {
    81  		item.Withdrawals = make(types.Withdrawals, 0)
    82  	}
    83  	if fastSync && !header.EmptyReceipts() {
    84  		item.pending.Store(item.pending.Load() | (1 << receiptType))
    85  	}
    86  	return item
    87  }
    88  
    89  // SetBodyDone flags the body as finished.
    90  func (f *fetchResult) SetBodyDone() {
    91  	if v := f.pending.Load(); (v & (1 << bodyType)) != 0 {
    92  		f.pending.Add(-1)
    93  	}
    94  }
    95  
    96  // AllDone checks if item is done.
    97  func (f *fetchResult) AllDone() bool {
    98  	return f.pending.Load() == 0
    99  }
   100  
   101  // SetReceiptsDone flags the receipts as finished.
   102  func (f *fetchResult) SetReceiptsDone() {
   103  	if v := f.pending.Load(); (v & (1 << receiptType)) != 0 {
   104  		f.pending.Add(-2)
   105  	}
   106  }
   107  
   108  // Done checks if the given type is done already
   109  func (f *fetchResult) Done(kind uint) bool {
   110  	v := f.pending.Load()
   111  	return v&(1<<kind) == 0
   112  }
   113  
   114  // queue represents hashes that are either need fetching or are being fetched
   115  type queue struct {
   116  	mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
   117  
   118  	// Headers are "special", they download in batches, supported by a skeleton chain
   119  	headerHead      common.Hash                    // Hash of the last queued header to verify order
   120  	headerTaskPool  map[uint64]*types.Header       // Pending header retrieval tasks, mapping starting indexes to skeleton headers
   121  	headerTaskQueue *prque.Prque[int64, uint64]    // Priority queue of the skeleton indexes to fetch the filling headers for
   122  	headerPeerMiss  map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable
   123  	headerPendPool  map[string]*fetchRequest       // Currently pending header retrieval operations
   124  	headerResults   []*types.Header                // Result cache accumulating the completed headers
   125  	headerHashes    []common.Hash                  // Result cache accumulating the completed header hashes
   126  	headerProced    int                            // Number of headers already processed from the results
   127  	headerOffset    uint64                         // Number of the first header in the result cache
   128  	headerContCh    chan bool                      // Channel to notify when header download finishes
   129  
   130  	// All data retrievals below are based on an already assembles header chain
   131  	blockTaskPool  map[common.Hash]*types.Header      // Pending block (body) retrieval tasks, mapping hashes to headers
   132  	blockTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the blocks (bodies) for
   133  	blockPendPool  map[string]*fetchRequest           // Currently pending block (body) retrieval operations
   134  	blockWakeCh    chan bool                          // Channel to notify the block fetcher of new tasks
   135  
   136  	receiptTaskPool  map[common.Hash]*types.Header      // Pending receipt retrieval tasks, mapping hashes to headers
   137  	receiptTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the receipts for
   138  	receiptPendPool  map[string]*fetchRequest           // Currently pending receipt retrieval operations
   139  	receiptWakeCh    chan bool                          // Channel to notify when receipt fetcher of new tasks
   140  
   141  	resultCache *resultStore       // Downloaded but not yet delivered fetch results
   142  	resultSize  common.StorageSize // Approximate size of a block (exponential moving average)
   143  
   144  	lock   *sync.RWMutex
   145  	active *sync.Cond
   146  	closed bool
   147  
   148  	logTime time.Time // Time instance when status was last reported
   149  }
   150  
   151  // newQueue creates a new download queue for scheduling block retrieval.
   152  func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue {
   153  	lock := new(sync.RWMutex)
   154  	q := &queue{
   155  		headerContCh:     make(chan bool, 1),
   156  		blockTaskQueue:   prque.New[int64, *types.Header](nil),
   157  		blockWakeCh:      make(chan bool, 1),
   158  		receiptTaskQueue: prque.New[int64, *types.Header](nil),
   159  		receiptWakeCh:    make(chan bool, 1),
   160  		active:           sync.NewCond(lock),
   161  		lock:             lock,
   162  	}
   163  	q.Reset(blockCacheLimit, thresholdInitialSize)
   164  	return q
   165  }
   166  
   167  // Reset clears out the queue contents.
   168  func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) {
   169  	q.lock.Lock()
   170  	defer q.lock.Unlock()
   171  
   172  	q.closed = false
   173  	q.mode = FullSync
   174  
   175  	q.headerHead = common.Hash{}
   176  	q.headerPendPool = make(map[string]*fetchRequest)
   177  
   178  	q.blockTaskPool = make(map[common.Hash]*types.Header)
   179  	q.blockTaskQueue.Reset()
   180  	q.blockPendPool = make(map[string]*fetchRequest)
   181  
   182  	q.receiptTaskPool = make(map[common.Hash]*types.Header)
   183  	q.receiptTaskQueue.Reset()
   184  	q.receiptPendPool = make(map[string]*fetchRequest)
   185  
   186  	q.resultCache = newResultStore(blockCacheLimit)
   187  	q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize))
   188  }
   189  
   190  // Close marks the end of the sync, unblocking Results.
   191  // It may be called even if the queue is already closed.
   192  func (q *queue) Close() {
   193  	q.lock.Lock()
   194  	q.closed = true
   195  	q.active.Signal()
   196  	q.lock.Unlock()
   197  }
   198  
   199  // PendingHeaders retrieves the number of header requests pending for retrieval.
   200  func (q *queue) PendingHeaders() int {
   201  	q.lock.Lock()
   202  	defer q.lock.Unlock()
   203  
   204  	return q.headerTaskQueue.Size()
   205  }
   206  
   207  // PendingBodies retrieves the number of block body requests pending for retrieval.
   208  func (q *queue) PendingBodies() int {
   209  	q.lock.Lock()
   210  	defer q.lock.Unlock()
   211  
   212  	return q.blockTaskQueue.Size()
   213  }
   214  
   215  // PendingReceipts retrieves the number of block receipts pending for retrieval.
   216  func (q *queue) PendingReceipts() int {
   217  	q.lock.Lock()
   218  	defer q.lock.Unlock()
   219  
   220  	return q.receiptTaskQueue.Size()
   221  }
   222  
   223  // InFlightBlocks retrieves whether there are block fetch requests currently in
   224  // flight.
   225  func (q *queue) InFlightBlocks() bool {
   226  	q.lock.Lock()
   227  	defer q.lock.Unlock()
   228  
   229  	return len(q.blockPendPool) > 0
   230  }
   231  
   232  // InFlightReceipts retrieves whether there are receipt fetch requests currently
   233  // in flight.
   234  func (q *queue) InFlightReceipts() bool {
   235  	q.lock.Lock()
   236  	defer q.lock.Unlock()
   237  
   238  	return len(q.receiptPendPool) > 0
   239  }
   240  
   241  // Idle returns if the queue is fully idle or has some data still inside.
   242  func (q *queue) Idle() bool {
   243  	q.lock.Lock()
   244  	defer q.lock.Unlock()
   245  
   246  	queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()
   247  	pending := len(q.blockPendPool) + len(q.receiptPendPool)
   248  
   249  	return (queued + pending) == 0
   250  }
   251  
   252  // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
   253  // up an already retrieved header skeleton.
   254  func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
   255  	q.lock.Lock()
   256  	defer q.lock.Unlock()
   257  
   258  	// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
   259  	if q.headerResults != nil {
   260  		panic("skeleton assembly already in progress")
   261  	}
   262  	// Schedule all the header retrieval tasks for the skeleton assembly
   263  	q.headerTaskPool = make(map[uint64]*types.Header)
   264  	q.headerTaskQueue = prque.New[int64, uint64](nil)
   265  	q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
   266  	q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
   267  	q.headerHashes = make([]common.Hash, len(skeleton)*MaxHeaderFetch)
   268  	q.headerProced = 0
   269  	q.headerOffset = from
   270  	q.headerContCh = make(chan bool, 1)
   271  
   272  	for i, header := range skeleton {
   273  		index := from + uint64(i*MaxHeaderFetch)
   274  
   275  		q.headerTaskPool[index] = header
   276  		q.headerTaskQueue.Push(index, -int64(index))
   277  	}
   278  }
   279  
   280  // RetrieveHeaders retrieves the header chain assemble based on the scheduled
   281  // skeleton.
   282  func (q *queue) RetrieveHeaders() ([]*types.Header, []common.Hash, int) {
   283  	q.lock.Lock()
   284  	defer q.lock.Unlock()
   285  
   286  	headers, hashes, proced := q.headerResults, q.headerHashes, q.headerProced
   287  	q.headerResults, q.headerHashes, q.headerProced = nil, nil, 0
   288  
   289  	return headers, hashes, proced
   290  }
   291  
   292  // Schedule adds a set of headers for the download queue for scheduling, returning
   293  // the new headers encountered.
   294  func (q *queue) Schedule(headers []*types.Header, hashes []common.Hash, from uint64) []*types.Header {
   295  	q.lock.Lock()
   296  	defer q.lock.Unlock()
   297  
   298  	// Insert all the headers prioritised by the contained block number
   299  	inserts := make([]*types.Header, 0, len(headers))
   300  	for i, header := range headers {
   301  		// Make sure chain order is honoured and preserved throughout
   302  		hash := hashes[i]
   303  		if header.Number == nil || header.Number.Uint64() != from {
   304  			log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
   305  			break
   306  		}
   307  		if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
   308  			log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
   309  			break
   310  		}
   311  		// Make sure no duplicate requests are executed
   312  		// We cannot skip this, even if the block is empty, since this is
   313  		// what triggers the fetchResult creation.
   314  		if _, ok := q.blockTaskPool[hash]; ok {
   315  			log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
   316  		} else {
   317  			q.blockTaskPool[hash] = header
   318  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   319  		}
   320  		// Queue for receipt retrieval
   321  		if q.mode == SnapSync && !header.EmptyReceipts() {
   322  			if _, ok := q.receiptTaskPool[hash]; ok {
   323  				log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
   324  			} else {
   325  				q.receiptTaskPool[hash] = header
   326  				q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   327  			}
   328  		}
   329  		inserts = append(inserts, header)
   330  		q.headerHead = hash
   331  		from++
   332  	}
   333  	return inserts
   334  }
   335  
   336  // Results retrieves and permanently removes a batch of fetch results from
   337  // the cache. the result slice will be empty if the queue has been closed.
   338  // Results can be called concurrently with Deliver and Schedule,
   339  // but assumes that there are not two simultaneous callers to Results
   340  func (q *queue) Results(block bool) []*fetchResult {
   341  	// Abort early if there are no items and non-blocking requested
   342  	if !block && !q.resultCache.HasCompletedItems() {
   343  		return nil
   344  	}
   345  	closed := false
   346  	for !closed && !q.resultCache.HasCompletedItems() {
   347  		// In order to wait on 'active', we need to obtain the lock.
   348  		// That may take a while, if someone is delivering at the same
   349  		// time, so after obtaining the lock, we check again if there
   350  		// are any results to fetch.
   351  		// Also, in-between we ask for the lock and the lock is obtained,
   352  		// someone can have closed the queue. In that case, we should
   353  		// return the available results and stop blocking
   354  		q.lock.Lock()
   355  		if q.resultCache.HasCompletedItems() || q.closed {
   356  			q.lock.Unlock()
   357  			break
   358  		}
   359  		// No items available, and not closed
   360  		q.active.Wait()
   361  		closed = q.closed
   362  		q.lock.Unlock()
   363  	}
   364  	// Regardless if closed or not, we can still deliver whatever we have
   365  	results := q.resultCache.GetCompleted(maxResultsProcess)
   366  	for _, result := range results {
   367  		// Recalculate the result item weights to prevent memory exhaustion
   368  		size := result.Header.Size()
   369  		for _, uncle := range result.Uncles {
   370  			size += uncle.Size()
   371  		}
   372  		for _, receipt := range result.Receipts {
   373  			size += receipt.Size()
   374  		}
   375  		for _, tx := range result.Transactions {
   376  			size += common.StorageSize(tx.Size())
   377  		}
   378  		q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
   379  			(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
   380  	}
   381  	// Using the newly calibrated resultsize, figure out the new throttle limit
   382  	// on the result cache
   383  	throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
   384  	throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
   385  
   386  	// With results removed from the cache, wake throttled fetchers
   387  	for _, ch := range []chan bool{q.blockWakeCh, q.receiptWakeCh} {
   388  		select {
   389  		case ch <- true:
   390  		default:
   391  		}
   392  	}
   393  	// Log some info at certain times
   394  	if time.Since(q.logTime) >= 60*time.Second {
   395  		q.logTime = time.Now()
   396  
   397  		info := q.Stats()
   398  		info = append(info, "throttle", throttleThreshold)
   399  		log.Debug("Downloader queue stats", info...)
   400  	}
   401  	return results
   402  }
   403  
   404  func (q *queue) Stats() []interface{} {
   405  	q.lock.RLock()
   406  	defer q.lock.RUnlock()
   407  
   408  	return q.stats()
   409  }
   410  
   411  func (q *queue) stats() []interface{} {
   412  	return []interface{}{
   413  		"receiptTasks", q.receiptTaskQueue.Size(),
   414  		"blockTasks", q.blockTaskQueue.Size(),
   415  		"itemSize", q.resultSize,
   416  	}
   417  }
   418  
   419  // ReserveHeaders reserves a set of headers for the given peer, skipping any
   420  // previously failed batches.
   421  func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
   422  	q.lock.Lock()
   423  	defer q.lock.Unlock()
   424  
   425  	// Short circuit if the peer's already downloading something (sanity check to
   426  	// not corrupt state)
   427  	if _, ok := q.headerPendPool[p.id]; ok {
   428  		return nil
   429  	}
   430  	// Retrieve a batch of hashes, skipping previously failed ones
   431  	send, skip := uint64(0), []uint64{}
   432  	for send == 0 && !q.headerTaskQueue.Empty() {
   433  		from, _ := q.headerTaskQueue.Pop()
   434  		if q.headerPeerMiss[p.id] != nil {
   435  			if _, ok := q.headerPeerMiss[p.id][from]; ok {
   436  				skip = append(skip, from)
   437  				continue
   438  			}
   439  		}
   440  		send = from
   441  	}
   442  	// Merge all the skipped batches back
   443  	for _, from := range skip {
   444  		q.headerTaskQueue.Push(from, -int64(from))
   445  	}
   446  	// Assemble and return the block download request
   447  	if send == 0 {
   448  		return nil
   449  	}
   450  	request := &fetchRequest{
   451  		Peer: p,
   452  		From: send,
   453  		Time: time.Now(),
   454  	}
   455  	q.headerPendPool[p.id] = request
   456  	return request
   457  }
   458  
   459  // ReserveBodies reserves a set of body fetches for the given peer, skipping any
   460  // previously failed downloads. Beside the next batch of needed fetches, it also
   461  // returns a flag whether empty blocks were queued requiring processing.
   462  func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   463  	q.lock.Lock()
   464  	defer q.lock.Unlock()
   465  
   466  	return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
   467  }
   468  
   469  // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
   470  // any previously failed downloads. Beside the next batch of needed fetches, it
   471  // also returns a flag whether empty receipts were queued requiring importing.
   472  func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   473  	q.lock.Lock()
   474  	defer q.lock.Unlock()
   475  
   476  	return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)
   477  }
   478  
   479  // reserveHeaders reserves a set of data download operations for a given peer,
   480  // skipping any previously failed ones. This method is a generic version used
   481  // by the individual special reservation functions.
   482  //
   483  // Note, this method expects the queue lock to be already held for writing. The
   484  // reason the lock is not obtained in here is because the parameters already need
   485  // to access the queue, so they already need a lock anyway.
   486  //
   487  // Returns:
   488  //
   489  //	item     - the fetchRequest
   490  //	progress - whether any progress was made
   491  //	throttle - if the caller should throttle for a while
   492  func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque[int64, *types.Header],
   493  	pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
   494  	// Short circuit if the pool has been depleted, or if the peer's already
   495  	// downloading something (sanity check not to corrupt state)
   496  	if taskQueue.Empty() {
   497  		return nil, false, true
   498  	}
   499  	if _, ok := pendPool[p.id]; ok {
   500  		return nil, false, false
   501  	}
   502  	// Retrieve a batch of tasks, skipping previously failed ones
   503  	send := make([]*types.Header, 0, count)
   504  	skip := make([]*types.Header, 0)
   505  	progress := false
   506  	throttled := false
   507  	for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
   508  		// the task queue will pop items in order, so the highest prio block
   509  		// is also the lowest block number.
   510  		header, _ := taskQueue.Peek()
   511  
   512  		// we can ask the resultcache if this header is within the
   513  		// "prioritized" segment of blocks. If it is not, we need to throttle
   514  
   515  		stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == SnapSync)
   516  		if stale {
   517  			// Don't put back in the task queue, this item has already been
   518  			// delivered upstream
   519  			taskQueue.PopItem()
   520  			progress = true
   521  			delete(taskPool, header.Hash())
   522  			proc = proc - 1
   523  			log.Error("Fetch reservation already delivered", "number", header.Number.Uint64())
   524  			continue
   525  		}
   526  		if throttle {
   527  			// There are no resultslots available. Leave it in the task queue
   528  			// However, if there are any left as 'skipped', we should not tell
   529  			// the caller to throttle, since we still want some other
   530  			// peer to fetch those for us
   531  			throttled = len(skip) == 0
   532  			break
   533  		}
   534  		if err != nil {
   535  			// this most definitely should _not_ happen
   536  			log.Warn("Failed to reserve headers", "err", err)
   537  			// There are no resultslots available. Leave it in the task queue
   538  			break
   539  		}
   540  		if item.Done(kind) {
   541  			// If it's a noop, we can skip this task
   542  			delete(taskPool, header.Hash())
   543  			taskQueue.PopItem()
   544  			proc = proc - 1
   545  			progress = true
   546  			continue
   547  		}
   548  		// Remove it from the task queue
   549  		taskQueue.PopItem()
   550  		// Otherwise unless the peer is known not to have the data, add to the retrieve list
   551  		if p.Lacks(header.Hash()) {
   552  			skip = append(skip, header)
   553  		} else {
   554  			send = append(send, header)
   555  		}
   556  	}
   557  	// Merge all the skipped headers back
   558  	for _, header := range skip {
   559  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   560  	}
   561  	if q.resultCache.HasCompletedItems() {
   562  		// Wake Results, resultCache was modified
   563  		q.active.Signal()
   564  	}
   565  	// Assemble and return the block download request
   566  	if len(send) == 0 {
   567  		return nil, progress, throttled
   568  	}
   569  	request := &fetchRequest{
   570  		Peer:    p,
   571  		Headers: send,
   572  		Time:    time.Now(),
   573  	}
   574  	pendPool[p.id] = request
   575  	return request, progress, throttled
   576  }
   577  
   578  // Revoke cancels all pending requests belonging to a given peer. This method is
   579  // meant to be called during a peer drop to quickly reassign owned data fetches
   580  // to remaining nodes.
   581  func (q *queue) Revoke(peerID string) {
   582  	q.lock.Lock()
   583  	defer q.lock.Unlock()
   584  
   585  	if request, ok := q.headerPendPool[peerID]; ok {
   586  		q.headerTaskQueue.Push(request.From, -int64(request.From))
   587  		delete(q.headerPendPool, peerID)
   588  	}
   589  	if request, ok := q.blockPendPool[peerID]; ok {
   590  		for _, header := range request.Headers {
   591  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   592  		}
   593  		delete(q.blockPendPool, peerID)
   594  	}
   595  	if request, ok := q.receiptPendPool[peerID]; ok {
   596  		for _, header := range request.Headers {
   597  			q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   598  		}
   599  		delete(q.receiptPendPool, peerID)
   600  	}
   601  }
   602  
   603  // ExpireHeaders cancels a request that timed out and moves the pending fetch
   604  // task back into the queue for rescheduling.
   605  func (q *queue) ExpireHeaders(peer string) int {
   606  	q.lock.Lock()
   607  	defer q.lock.Unlock()
   608  
   609  	headerTimeoutMeter.Mark(1)
   610  	return q.expire(peer, q.headerPendPool, q.headerTaskQueue)
   611  }
   612  
   613  // ExpireBodies checks for in flight block body requests that exceeded a timeout
   614  // allowance, canceling them and returning the responsible peers for penalisation.
   615  func (q *queue) ExpireBodies(peer string) int {
   616  	q.lock.Lock()
   617  	defer q.lock.Unlock()
   618  
   619  	bodyTimeoutMeter.Mark(1)
   620  	return q.expire(peer, q.blockPendPool, q.blockTaskQueue)
   621  }
   622  
   623  // ExpireReceipts checks for in flight receipt requests that exceeded a timeout
   624  // allowance, canceling them and returning the responsible peers for penalisation.
   625  func (q *queue) ExpireReceipts(peer string) int {
   626  	q.lock.Lock()
   627  	defer q.lock.Unlock()
   628  
   629  	receiptTimeoutMeter.Mark(1)
   630  	return q.expire(peer, q.receiptPendPool, q.receiptTaskQueue)
   631  }
   632  
   633  // expire is the generic check that moves a specific expired task from a pending
   634  // pool back into a task pool. The syntax on the passed taskQueue is a bit weird
   635  // as we would need a generic expire method to handle both types, but that is not
   636  // supported at the moment at least (Go 1.19).
   637  //
   638  // Note, this method expects the queue lock to be already held. The reason the
   639  // lock is not obtained in here is that the parameters already need to access
   640  // the queue, so they already need a lock anyway.
   641  func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue interface{}) int {
   642  	// Retrieve the request being expired and log an error if it's non-existent,
   643  	// as there's no order of events that should lead to such expirations.
   644  	req := pendPool[peer]
   645  	if req == nil {
   646  		log.Error("Expired request does not exist", "peer", peer)
   647  		return 0
   648  	}
   649  	delete(pendPool, peer)
   650  
   651  	// Return any non-satisfied requests to the pool
   652  	if req.From > 0 {
   653  		taskQueue.(*prque.Prque[int64, uint64]).Push(req.From, -int64(req.From))
   654  	}
   655  	for _, header := range req.Headers {
   656  		taskQueue.(*prque.Prque[int64, *types.Header]).Push(header, -int64(header.Number.Uint64()))
   657  	}
   658  	return len(req.Headers)
   659  }
   660  
   661  // DeliverHeaders injects a header retrieval response into the header results
   662  // cache. This method either accepts all headers it received, or none of them
   663  // if they do not map correctly to the skeleton.
   664  //
   665  // If the headers are accepted, the method makes an attempt to deliver the set
   666  // of ready headers to the processor to keep the pipeline full. However, it will
   667  // not block to prevent stalling other pending deliveries.
   668  func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []common.Hash, headerProcCh chan *headerTask) (int, error) {
   669  	q.lock.Lock()
   670  	defer q.lock.Unlock()
   671  
   672  	var logger log.Logger
   673  	if len(id) < 16 {
   674  		// Tests use short IDs, don't choke on them
   675  		logger = log.New("peer", id)
   676  	} else {
   677  		logger = log.New("peer", id[:16])
   678  	}
   679  	// Short circuit if the data was never requested
   680  	request := q.headerPendPool[id]
   681  	if request == nil {
   682  		headerDropMeter.Mark(int64(len(headers)))
   683  		return 0, errNoFetchesPending
   684  	}
   685  	delete(q.headerPendPool, id)
   686  
   687  	headerReqTimer.UpdateSince(request.Time)
   688  	headerInMeter.Mark(int64(len(headers)))
   689  
   690  	// Ensure headers can be mapped onto the skeleton chain
   691  	target := q.headerTaskPool[request.From].Hash()
   692  
   693  	accepted := len(headers) == MaxHeaderFetch
   694  	if accepted {
   695  		if headers[0].Number.Uint64() != request.From {
   696  			logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", hashes[0], "expected", request.From)
   697  			accepted = false
   698  		} else if hashes[len(headers)-1] != target {
   699  			logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", hashes[len(headers)-1], "expected", target)
   700  			accepted = false
   701  		}
   702  	}
   703  	if accepted {
   704  		parentHash := hashes[0]
   705  		for i, header := range headers[1:] {
   706  			hash := hashes[i+1]
   707  			if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
   708  				logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want)
   709  				accepted = false
   710  				break
   711  			}
   712  			if parentHash != header.ParentHash {
   713  				logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
   714  				accepted = false
   715  				break
   716  			}
   717  			// Set-up parent hash for next round
   718  			parentHash = hash
   719  		}
   720  	}
   721  	// If the batch of headers wasn't accepted, mark as unavailable
   722  	if !accepted {
   723  		logger.Trace("Skeleton filling not accepted", "from", request.From)
   724  		headerDropMeter.Mark(int64(len(headers)))
   725  
   726  		miss := q.headerPeerMiss[id]
   727  		if miss == nil {
   728  			q.headerPeerMiss[id] = make(map[uint64]struct{})
   729  			miss = q.headerPeerMiss[id]
   730  		}
   731  		miss[request.From] = struct{}{}
   732  
   733  		q.headerTaskQueue.Push(request.From, -int64(request.From))
   734  		return 0, errors.New("delivery not accepted")
   735  	}
   736  	// Clean up a successful fetch and try to deliver any sub-results
   737  	copy(q.headerResults[request.From-q.headerOffset:], headers)
   738  	copy(q.headerHashes[request.From-q.headerOffset:], hashes)
   739  
   740  	delete(q.headerTaskPool, request.From)
   741  
   742  	ready := 0
   743  	for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
   744  		ready += MaxHeaderFetch
   745  	}
   746  	if ready > 0 {
   747  		// Headers are ready for delivery, gather them and push forward (non blocking)
   748  		processHeaders := make([]*types.Header, ready)
   749  		copy(processHeaders, q.headerResults[q.headerProced:q.headerProced+ready])
   750  
   751  		processHashes := make([]common.Hash, ready)
   752  		copy(processHashes, q.headerHashes[q.headerProced:q.headerProced+ready])
   753  
   754  		select {
   755  		case headerProcCh <- &headerTask{
   756  			headers: processHeaders,
   757  			hashes:  processHashes,
   758  		}:
   759  			logger.Trace("Pre-scheduled new headers", "count", len(processHeaders), "from", processHeaders[0].Number)
   760  			q.headerProced += len(processHeaders)
   761  		default:
   762  		}
   763  	}
   764  	// Check for termination and return
   765  	if len(q.headerTaskPool) == 0 {
   766  		q.headerContCh <- false
   767  	}
   768  	return len(headers), nil
   769  }
   770  
   771  // DeliverBodies injects a block body retrieval response into the results queue.
   772  // The method returns the number of blocks bodies accepted from the delivery and
   773  // also wakes any threads waiting for data delivery.
   774  func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash,
   775  	uncleLists [][]*types.Header, uncleListHashes []common.Hash,
   776  	withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash) (int, error) {
   777  	q.lock.Lock()
   778  	defer q.lock.Unlock()
   779  
   780  	validate := func(index int, header *types.Header) error {
   781  		if txListHashes[index] != header.TxHash {
   782  			return errInvalidBody
   783  		}
   784  		if uncleListHashes[index] != header.UncleHash {
   785  			return errInvalidBody
   786  		}
   787  		if header.WithdrawalsHash == nil {
   788  			// nil hash means that withdrawals should not be present in body
   789  			if withdrawalLists[index] != nil {
   790  				return errInvalidBody
   791  			}
   792  		} else { // non-nil hash: body must have withdrawals
   793  			if withdrawalLists[index] == nil {
   794  				return errInvalidBody
   795  			}
   796  			if withdrawalListHashes[index] != *header.WithdrawalsHash {
   797  				return errInvalidBody
   798  			}
   799  		}
   800  		// Blocks must have a number of blobs corresponding to the header gas usage,
   801  		// and zero before the Cancun hardfork.
   802  		var blobs int
   803  		for _, tx := range txLists[index] {
   804  			// Count the number of blobs to validate against the header's blobGasUsed
   805  			blobs += len(tx.BlobHashes())
   806  
   807  			// Validate the data blobs individually too
   808  			if tx.Type() == types.BlobTxType {
   809  				if len(tx.BlobHashes()) == 0 {
   810  					return errInvalidBody
   811  				}
   812  				for _, hash := range tx.BlobHashes() {
   813  					if hash[0] != params.BlobTxHashVersion {
   814  						return errInvalidBody
   815  					}
   816  				}
   817  				if tx.BlobTxSidecar() != nil {
   818  					return errInvalidBody
   819  				}
   820  			}
   821  		}
   822  		if header.BlobGasUsed != nil {
   823  			if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated
   824  				return errInvalidBody
   825  			}
   826  		} else {
   827  			if blobs != 0 {
   828  				return errInvalidBody
   829  			}
   830  		}
   831  		return nil
   832  	}
   833  
   834  	reconstruct := func(index int, result *fetchResult) {
   835  		result.Transactions = txLists[index]
   836  		result.Uncles = uncleLists[index]
   837  		result.Withdrawals = withdrawalLists[index]
   838  		result.SetBodyDone()
   839  	}
   840  	return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
   841  		bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct)
   842  }
   843  
   844  // DeliverReceipts injects a receipt retrieval response into the results queue.
   845  // The method returns the number of transaction receipts accepted from the delivery
   846  // and also wakes any threads waiting for data delivery.
   847  func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt, receiptListHashes []common.Hash) (int, error) {
   848  	q.lock.Lock()
   849  	defer q.lock.Unlock()
   850  
   851  	validate := func(index int, header *types.Header) error {
   852  		if receiptListHashes[index] != header.ReceiptHash {
   853  			return errInvalidReceipt
   854  		}
   855  		return nil
   856  	}
   857  	reconstruct := func(index int, result *fetchResult) {
   858  		result.Receipts = receiptList[index]
   859  		result.SetReceiptsDone()
   860  	}
   861  	return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
   862  		receiptReqTimer, receiptInMeter, receiptDropMeter, len(receiptList), validate, reconstruct)
   863  }
   864  
   865  // deliver injects a data retrieval response into the results queue.
   866  //
   867  // Note, this method expects the queue lock to be already held for writing. The
   868  // reason this lock is not obtained in here is because the parameters already need
   869  // to access the queue, so they already need a lock anyway.
   870  func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
   871  	taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest,
   872  	reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter,
   873  	results int, validate func(index int, header *types.Header) error,
   874  	reconstruct func(index int, result *fetchResult)) (int, error) {
   875  	// Short circuit if the data was never requested
   876  	request := pendPool[id]
   877  	if request == nil {
   878  		resDropMeter.Mark(int64(results))
   879  		return 0, errNoFetchesPending
   880  	}
   881  	delete(pendPool, id)
   882  
   883  	reqTimer.UpdateSince(request.Time)
   884  	resInMeter.Mark(int64(results))
   885  
   886  	// If no data items were retrieved, mark them as unavailable for the origin peer
   887  	if results == 0 {
   888  		for _, header := range request.Headers {
   889  			request.Peer.MarkLacking(header.Hash())
   890  		}
   891  	}
   892  	// Assemble each of the results with their headers and retrieved data parts
   893  	var (
   894  		accepted int
   895  		failure  error
   896  		i        int
   897  		hashes   []common.Hash
   898  	)
   899  	for _, header := range request.Headers {
   900  		// Short circuit assembly if no more fetch results are found
   901  		if i >= results {
   902  			break
   903  		}
   904  		// Validate the fields
   905  		if err := validate(i, header); err != nil {
   906  			failure = err
   907  			break
   908  		}
   909  		hashes = append(hashes, header.Hash())
   910  		i++
   911  	}
   912  
   913  	for _, header := range request.Headers[:i] {
   914  		if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil && !stale {
   915  			reconstruct(accepted, res)
   916  		} else {
   917  			// else: between here and above, some other peer filled this result,
   918  			// or it was indeed a no-op. This should not happen, but if it does it's
   919  			// not something to panic about
   920  			log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)
   921  			failure = errStaleDelivery
   922  		}
   923  		// Clean up a successful fetch
   924  		delete(taskPool, hashes[accepted])
   925  		accepted++
   926  	}
   927  	resDropMeter.Mark(int64(results - accepted))
   928  
   929  	// Return all failed or missing fetches to the queue
   930  	for _, header := range request.Headers[accepted:] {
   931  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   932  	}
   933  	// Wake up Results
   934  	if accepted > 0 {
   935  		q.active.Signal()
   936  	}
   937  	if failure == nil {
   938  		return accepted, nil
   939  	}
   940  	// If none of the data was good, it's a stale delivery
   941  	if accepted > 0 {
   942  		return accepted, fmt.Errorf("partial failure: %v", failure)
   943  	}
   944  	return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
   945  }
   946  
   947  // Prepare configures the result cache to allow accepting and caching inbound
   948  // fetch results.
   949  func (q *queue) Prepare(offset uint64, mode SyncMode) {
   950  	q.lock.Lock()
   951  	defer q.lock.Unlock()
   952  
   953  	// Prepare the queue for sync results
   954  	q.resultCache.Prepare(offset)
   955  	q.mode = mode
   956  }