github.com/dominant-strategies/go-quai@v0.28.2/eth/downloader/queue.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Contains the block download scheduler to collect download tasks and schedule
    18  // them in an ordered, and throttled way.
    19  
    20  package downloader
    21  
    22  import (
    23  	"errors"
    24  	"fmt"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/dominant-strategies/go-quai/common"
    30  	"github.com/dominant-strategies/go-quai/common/prque"
    31  	"github.com/dominant-strategies/go-quai/core/types"
    32  	"github.com/dominant-strategies/go-quai/log"
    33  	"github.com/dominant-strategies/go-quai/metrics"
    34  	"github.com/dominant-strategies/go-quai/trie"
    35  )
    36  
    37  const (
    38  	bodyType    = uint(0)
    39  	receiptType = uint(1)
    40  )
    41  
    42  var (
    43  	blockCacheMaxItems     = 8192              // Maximum number of blocks to cache before throttling the download
    44  	blockCacheInitialItems = 2048              // Initial number of blocks to start fetching, before we know the sizes of the blocks
    45  	blockCacheMemory       = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
    46  	blockCacheSizeWeight   = 0.1               // Multiplier to approximate the average block size based on past ones
    47  )
    48  
    49  var (
    50  	errNoFetchesPending = errors.New("no fetches pending")
    51  	errStaleDelivery    = errors.New("stale delivery")
    52  )
    53  
    54  // fetchRequest is a currently running data retrieval operation.
    55  type fetchRequest struct {
    56  	Peer    *peerConnection // Peer to which the request was sent
    57  	From    uint64          // [eth/62] Requested chain element index (used for skeleton fills only)
    58  	To      uint64          // Expected stopping number for the request (used for skeleton fills only)
    59  	Headers []*types.Header // [eth/62] Requested headers, sorted by request order
    60  	Time    time.Time       // Time when the request was made
    61  }
    62  
    63  // fetchResult is a struct collecting partial results from data fetchers until
    64  // all outstanding pieces complete and the result as a whole can be processed.
    65  type fetchResult struct {
    66  	pending int32 // Flag telling what deliveries are outstanding
    67  
    68  	Header          *types.Header
    69  	Uncles          []*types.Header
    70  	Transactions    types.Transactions
    71  	ExtTransactions types.Transactions
    72  	SubManifest     types.BlockManifest
    73  	Receipts        types.Receipts
    74  }
    75  
    76  func newFetchResult(header *types.Header) *fetchResult {
    77  	item := &fetchResult{
    78  		Header: header,
    79  	}
    80  	if !header.EmptyBody() {
    81  		item.pending |= (1 << bodyType)
    82  	}
    83  	return item
    84  }
    85  
    86  // SetBodyDone flags the body as finished.
    87  func (f *fetchResult) SetBodyDone() {
    88  	if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
    89  		atomic.AddInt32(&f.pending, -1)
    90  	}
    91  }
    92  
    93  // AllDone checks if item is done.
    94  func (f *fetchResult) AllDone() bool {
    95  	return atomic.LoadInt32(&f.pending) == 0
    96  }
    97  
    98  // Done checks if the given type is done already
    99  func (f *fetchResult) Done(kind uint) bool {
   100  	v := atomic.LoadInt32(&f.pending)
   101  	return v&(1<<kind) == 0
   102  }
   103  
   104  // queue represents hashes that are either need fetching or are being fetched
   105  type queue struct {
   106  	mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
   107  
   108  	// Headers are "special", they download in batches, supported by a skeleton chain
   109  	headerHead      common.Hash              // Hash of the last queued header to verify order
   110  	headerTaskPool  map[uint64]*types.Header // Pending header retrieval tasks, mapping starting indexes to skeleton headers
   111  	headerToPool    map[uint64]uint64
   112  	headerTaskQueue *prque.Prque                   // Priority queue of the skeleton indexes to fetch the filling headers for
   113  	headerPeerMiss  map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable
   114  	headerPendPool  map[string]*fetchRequest       // Currently pending header retrieval operations
   115  	headerResults   []*types.Header                // Result cache accumulating the completed headers
   116  	headerProced    int                            // Number of headers already processed from the results
   117  	headerOffset    uint64                         // Number of the first header in the result cache
   118  	headerContCh    chan bool                      // Channel to notify when header download finishes
   119  
   120  	// All data retrievals below are based on an already assembles header chain
   121  	blockTaskPool  map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers
   122  	blockTaskQueue *prque.Prque                  // Priority queue of the headers to fetch the blocks (bodies) for
   123  	blockPendPool  map[string]*fetchRequest      // Currently pending block (body) retrieval operations
   124  
   125  	resultCache *resultStore       // Downloaded but not yet delivered fetch results
   126  	resultSize  common.StorageSize // Approximate size of a block (exponential moving average)
   127  
   128  	lock   *sync.RWMutex
   129  	active *sync.Cond
   130  	closed bool
   131  
   132  	lastStatLog time.Time
   133  }
   134  
   135  // newQueue creates a new download queue for scheduling block retrieval.
   136  func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue {
   137  	lock := new(sync.RWMutex)
   138  	q := &queue{
   139  		headerContCh:   make(chan bool),
   140  		blockTaskQueue: prque.New(nil),
   141  		active:         sync.NewCond(lock),
   142  		lock:           lock,
   143  	}
   144  	q.Reset(blockCacheLimit, thresholdInitialSize)
   145  	return q
   146  }
   147  
   148  // Reset clears out the queue contents.
   149  func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) {
   150  	q.lock.Lock()
   151  	defer q.lock.Unlock()
   152  
   153  	q.closed = false
   154  	q.mode = FullSync
   155  
   156  	q.headerHead = common.Hash{}
   157  	q.headerPendPool = make(map[string]*fetchRequest)
   158  
   159  	q.blockTaskPool = make(map[common.Hash]*types.Header)
   160  	q.blockTaskQueue.Reset()
   161  	q.blockPendPool = make(map[string]*fetchRequest)
   162  
   163  	q.resultCache = newResultStore(blockCacheLimit)
   164  	q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize))
   165  }
   166  
   167  // Close marks the end of the sync, unblocking Results.
   168  // It may be called even if the queue is already closed.
   169  func (q *queue) Close() {
   170  	q.lock.Lock()
   171  	q.closed = true
   172  	q.active.Signal()
   173  	q.lock.Unlock()
   174  }
   175  
   176  // PendingHeaders retrieves the number of header requests pending for retrieval.
   177  func (q *queue) PendingHeaders() int {
   178  	q.lock.Lock()
   179  	defer q.lock.Unlock()
   180  
   181  	return q.headerTaskQueue.Size()
   182  }
   183  
   184  // PendingBlocks retrieves the number of block (body) requests pending for retrieval.
   185  func (q *queue) PendingBlocks() int {
   186  	q.lock.Lock()
   187  	defer q.lock.Unlock()
   188  
   189  	return q.blockTaskQueue.Size()
   190  }
   191  
   192  // InFlightHeaders retrieves whether there are header fetch requests currently
   193  // in flight.
   194  func (q *queue) InFlightHeaders() bool {
   195  	q.lock.Lock()
   196  	defer q.lock.Unlock()
   197  
   198  	return len(q.headerPendPool) > 0
   199  }
   200  
   201  // InFlightBlocks retrieves whether there are block fetch requests currently in
   202  // flight.
   203  func (q *queue) InFlightBlocks() bool {
   204  	q.lock.Lock()
   205  	defer q.lock.Unlock()
   206  
   207  	return len(q.blockPendPool) > 0
   208  }
   209  
   210  // Idle returns if the queue is fully idle or has some data still inside.
   211  func (q *queue) Idle() bool {
   212  	q.lock.Lock()
   213  	defer q.lock.Unlock()
   214  
   215  	queued := q.blockTaskQueue.Size()
   216  	pending := len(q.blockPendPool)
   217  
   218  	return (queued + pending) == 0
   219  }
   220  
   221  // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
   222  // up an already retrieved header skeleton.
   223  func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
   224  	q.lock.Lock()
   225  	defer q.lock.Unlock()
   226  
   227  	// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
   228  	if q.headerResults != nil {
   229  		panic("skeleton assembly already in progress")
   230  	}
   231  
   232  	// Schedule all the header retrieval tasks for the skeleton assembly
   233  	q.headerTaskPool = make(map[uint64]*types.Header)
   234  	q.headerToPool = make(map[uint64]uint64)
   235  	q.headerTaskQueue = prque.New(nil)
   236  	q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
   237  	q.headerResults = make([]*types.Header, skeleton[0].NumberU64()-skeleton[len(skeleton)-1].NumberU64())
   238  	q.headerProced = 0
   239  	q.headerOffset = skeleton[len(skeleton)-1].NumberU64() - 1
   240  	q.headerContCh = make(chan bool, 1)
   241  
   242  	for i, header := range skeleton {
   243  		if i < len(skeleton)-1 {
   244  			index := skeleton[i].NumberU64()
   245  			q.headerTaskPool[index] = header
   246  			q.headerToPool[index] = skeleton[i+1].NumberU64()
   247  			q.headerTaskQueue.Push(index, -int64(index))
   248  		}
   249  	}
   250  }
   251  
   252  // RetrieveHeaders retrieves the header chain assemble based on the scheduled
   253  // skeleton.
   254  func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
   255  	q.lock.Lock()
   256  	defer q.lock.Unlock()
   257  
   258  	// since we go backwards, the last header in the skeleton is not useful
   259  	headers, proced := q.headerResults, q.headerProced
   260  	q.headerResults, q.headerProced = nil, 0
   261  
   262  	return headers, proced
   263  }
   264  
   265  // Schedule adds a set of headers for the download queue for scheduling, returning
   266  // the new headers encountered.
   267  func (q *queue) Schedule(headers []*types.Header) []*types.Header {
   268  	q.lock.Lock()
   269  	defer q.lock.Unlock()
   270  
   271  	// Insert all the headers prioritised by the contained block number
   272  	inserts := make([]*types.Header, 0, len(headers))
   273  	for _, header := range headers {
   274  		// Make sure chain order is honoured and preserved throughout
   275  		hash := header.Hash()
   276  		if header == nil {
   277  			break
   278  		}
   279  		if header.Number() == nil {
   280  			log.Warn("Header broke chain ordering", "number is nil")
   281  			break
   282  		}
   283  		// Make sure no duplicate requests are executed
   284  		// We cannot skip this, even if the block is empty, since this is
   285  		// what triggers the fetchResult creation.
   286  		if _, ok := q.blockTaskPool[hash]; ok {
   287  			log.Warn("Header already scheduled for block fetch", "number", header.Number(), "hash", hash)
   288  		} else {
   289  			q.blockTaskPool[hash] = header
   290  			q.blockTaskQueue.Push(header, -int64(header.Number().Uint64()))
   291  		}
   292  		inserts = append(inserts, header)
   293  		q.headerHead = hash
   294  	}
   295  	return inserts
   296  }
   297  
   298  // Results retrieves and permanently removes a batch of fetch results from
   299  // the cache. the result slice will be empty if the queue has been closed.
   300  // Results can be called concurrently with Deliver and Schedule,
   301  // but assumes that there are not two simultaneous callers to Results
   302  func (q *queue) Results(block bool) []*fetchResult {
   303  	// Abort early if there are no items and non-blocking requested
   304  	if !block && !q.resultCache.HasCompletedItems() {
   305  		return nil
   306  	}
   307  	closed := false
   308  	for !closed && !q.resultCache.HasCompletedItems() {
   309  		// In order to wait on 'active', we need to obtain the lock.
   310  		// That may take a while, if someone is delivering at the same
   311  		// time, so after obtaining the lock, we check again if there
   312  		// are any results to fetch.
   313  		// Also, in-between we ask for the lock and the lock is obtained,
   314  		// someone can have closed the queue. In that case, we should
   315  		// return the available results and stop blocking
   316  		q.lock.Lock()
   317  		if q.resultCache.HasCompletedItems() || q.closed {
   318  			q.lock.Unlock()
   319  			break
   320  		}
   321  		// No items available, and not closed
   322  		q.active.Wait()
   323  		closed = q.closed
   324  		q.lock.Unlock()
   325  	}
   326  	// Regardless if closed or not, we can still deliver whatever we have
   327  	// We should only take one block out of the resultcache at a time.
   328  	// Append of the current block will trigger the pop of the next block.
   329  	results := q.resultCache.GetCompleted(maxHeadersProcess)
   330  	for _, result := range results {
   331  		// Recalculate the result item weights to prevent memory exhaustion
   332  		size := result.Header.Size()
   333  		for _, uncle := range result.Uncles {
   334  			size += uncle.Size()
   335  		}
   336  		for _, tx := range result.Transactions {
   337  			size += tx.Size()
   338  		}
   339  		for _, etxs := range result.ExtTransactions {
   340  			size += etxs.Size()
   341  		}
   342  		size += result.SubManifest.Size()
   343  		q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
   344  			(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
   345  	}
   346  	// Using the newly calibrated resultsize, figure out the new throttle limit
   347  	// on the result cache
   348  	throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
   349  	throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
   350  
   351  	// Log some info at certain times
   352  	if time.Since(q.lastStatLog) > 60*time.Second {
   353  		q.lastStatLog = time.Now()
   354  		info := q.Stats()
   355  		info = append(info, "throttle", throttleThreshold)
   356  		log.Info("Downloader queue stats", info...)
   357  	}
   358  	return results
   359  }
   360  
   361  func (q *queue) Stats() []interface{} {
   362  	q.lock.RLock()
   363  	defer q.lock.RUnlock()
   364  
   365  	return q.stats()
   366  }
   367  
   368  func (q *queue) stats() []interface{} {
   369  	return []interface{}{
   370  		"blockTasks", q.blockTaskQueue.Size(),
   371  		"itemSize", q.resultSize,
   372  	}
   373  }
   374  
   375  // ReserveHeaders reserves a set of headers for the given peer, skipping any
   376  // previously failed batches.
   377  func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
   378  	q.lock.Lock()
   379  	defer q.lock.Unlock()
   380  
   381  	// Short circuit if the peer's already downloading something (sanity check to
   382  	// not corrupt state)
   383  	if _, ok := q.headerPendPool[p.id]; ok {
   384  		return nil
   385  	}
   386  	// Retrieve a batch of hashes, skipping previously failed ones
   387  	send, skip := uint64(0), []uint64{}
   388  	for send == 0 && !q.headerTaskQueue.Empty() {
   389  		from, _ := q.headerTaskQueue.Pop()
   390  		if q.headerPeerMiss[p.id] != nil {
   391  			if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
   392  				skip = append(skip, from.(uint64))
   393  				continue
   394  			}
   395  		}
   396  		send = from.(uint64)
   397  	}
   398  	// Merge all the skipped batches back
   399  	for _, from := range skip {
   400  		q.headerTaskQueue.Push(from, -int64(from))
   401  	}
   402  	// Assemble and return the block download request
   403  	if send == 0 {
   404  		return nil
   405  	}
   406  	request := &fetchRequest{
   407  		Peer: p,
   408  		From: send - 1,
   409  		Time: time.Now(),
   410  	}
   411  	q.headerPendPool[p.id] = request
   412  	return request
   413  }
   414  
   415  // ReserveBodies reserves a set of body fetches for the given peer, skipping any
   416  // previously failed downloads. Beside the next batch of needed fetches, it also
   417  // returns a flag whether empty blocks were queued requiring processing.
   418  func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   419  	q.lock.Lock()
   420  	defer q.lock.Unlock()
   421  
   422  	return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
   423  }
   424  
   425  // reserveHeaders reserves a set of data download operations for a given peer,
   426  // skipping any previously failed ones. This method is a generic version used
   427  // by the individual special reservation functions.
   428  //
   429  // Note, this method expects the queue lock to be already held for writing. The
   430  // reason the lock is not obtained in here is because the parameters already need
   431  // to access the queue, so they already need a lock anyway.
   432  //
   433  // Returns:
   434  //
   435  //	item     - the fetchRequest
   436  //	progress - whether any progress was made
   437  //	throttle - if the caller should throttle for a while
   438  func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
   439  	pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
   440  	// Short circuit if the pool has been depleted, or if the peer's already
   441  	// downloading something (sanity check not to corrupt state)
   442  	if taskQueue.Empty() {
   443  		return nil, false, true
   444  	}
   445  	if _, ok := pendPool[p.id]; ok {
   446  		return nil, false, false
   447  	}
   448  	// Retrieve a batch of tasks, skipping previously failed ones
   449  	send := make([]*types.Header, 0, count)
   450  	skip := make([]*types.Header, 0)
   451  	progress := false
   452  	throttled := false
   453  
   454  	for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
   455  		// the task queue will pop items in order, so the highest prio block
   456  		// is also the lowest block number.
   457  		h, _ := taskQueue.Peek()
   458  		header := h.(*types.Header)
   459  		// we can ask the resultcache if this header is within the
   460  		// "prioritized" segment of blocks. If it is not, we need to throttle
   461  
   462  		stale, throttle, item, err := q.resultCache.AddFetch(header)
   463  		if stale {
   464  			// Don't put back in the task queue, this item has already been
   465  			// delivered upstream
   466  			taskQueue.PopItem()
   467  			progress = true
   468  			delete(taskPool, header.Hash())
   469  			proc = proc - 1
   470  			log.Trace("Fetch reservation already delivered", "number", header.Number().Uint64())
   471  			continue
   472  		}
   473  		if throttle {
   474  			// There are no resultslots available. Leave it in the task queue
   475  			// However, if there are any left as 'skipped', we should not tell
   476  			// the caller to throttle, since we still want some other
   477  			// peer to fetch those for us
   478  			throttled = len(skip) == 0
   479  			break
   480  		}
   481  		if err != nil {
   482  			// this most definitely should _not_ happen
   483  			log.Warn("Failed to reserve headers", "err", err)
   484  			// There are no resultslots available. Leave it in the task queue
   485  			break
   486  		}
   487  		if item.Done(kind) {
   488  			// If it's a noop, we can skip this task
   489  			delete(taskPool, header.Hash())
   490  			taskQueue.PopItem()
   491  			proc = proc - 1
   492  			progress = true
   493  			continue
   494  		}
   495  		// Remove it from the task queue
   496  		taskQueue.PopItem()
   497  		// Otherwise unless the peer is known not to have the data, add to the retrieve list
   498  		if p.Lacks(header.Hash()) {
   499  			skip = append(skip, header)
   500  		} else {
   501  			send = append(send, header)
   502  		}
   503  	}
   504  	// Merge all the skipped headers back
   505  	for _, header := range skip {
   506  		taskQueue.Push(header, -int64(header.Number().Uint64()))
   507  	}
   508  	if q.resultCache.HasCompletedItems() {
   509  		// Wake Results, resultCache was modified
   510  		q.active.Signal()
   511  	}
   512  	// Assemble and return the block download request
   513  	if len(send) == 0 {
   514  		return nil, progress, throttled
   515  	}
   516  	request := &fetchRequest{
   517  		Peer:    p,
   518  		Headers: send,
   519  		Time:    time.Now(),
   520  	}
   521  	pendPool[p.id] = request
   522  	return request, progress, throttled
   523  }
   524  
   525  // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
   526  func (q *queue) CancelHeaders(request *fetchRequest) {
   527  	q.lock.Lock()
   528  	defer q.lock.Unlock()
   529  	q.cancel(request, q.headerTaskQueue, q.headerPendPool)
   530  }
   531  
   532  // CancelBodies aborts a body fetch request, returning all pending headers to the
   533  // task queue.
   534  func (q *queue) CancelBodies(request *fetchRequest) {
   535  	q.lock.Lock()
   536  	defer q.lock.Unlock()
   537  	q.cancel(request, q.blockTaskQueue, q.blockPendPool)
   538  }
   539  
   540  // Cancel aborts a fetch request, returning all pending hashes to the task queue.
   541  func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) {
   542  	if request.From > 0 {
   543  		taskQueue.Push(request.From, -int64(request.From))
   544  	}
   545  	for _, header := range request.Headers {
   546  		taskQueue.Push(header, -int64(header.Number().Uint64()))
   547  	}
   548  	delete(pendPool, request.Peer.id)
   549  }
   550  
   551  // Revoke cancels all pending requests belonging to a given peer. This method is
   552  // meant to be called during a peer drop to quickly reassign owned data fetches
   553  // to remaining nodes.
   554  func (q *queue) Revoke(peerID string) {
   555  	q.lock.Lock()
   556  	defer q.lock.Unlock()
   557  
   558  	if request, ok := q.blockPendPool[peerID]; ok {
   559  		for _, header := range request.Headers {
   560  			q.blockTaskQueue.Push(header, -int64(header.Number().Uint64()))
   561  		}
   562  		delete(q.blockPendPool, peerID)
   563  	}
   564  }
   565  
   566  // ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
   567  // canceling them and returning the responsible peers for penalisation.
   568  func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
   569  	q.lock.Lock()
   570  	defer q.lock.Unlock()
   571  
   572  	return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
   573  }
   574  
   575  // ExpireBodies checks for in flight block body requests that exceeded a timeout
   576  // allowance, canceling them and returning the responsible peers for penalisation.
   577  func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
   578  	q.lock.Lock()
   579  	defer q.lock.Unlock()
   580  
   581  	return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)
   582  }
   583  
   584  // expire is the generic check that move expired tasks from a pending pool back
   585  // into a task pool, returning all entities caught with expired tasks.
   586  //
   587  // Note, this method expects the queue lock to be already held. The
   588  // reason the lock is not obtained in here is because the parameters already need
   589  // to access the queue, so they already need a lock anyway.
   590  func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
   591  	// Iterate over the expired requests and return each to the queue
   592  	expiries := make(map[string]int)
   593  	for id, request := range pendPool {
   594  		if time.Since(request.Time) > timeout {
   595  			// Update the metrics with the timeout
   596  			timeoutMeter.Mark(1)
   597  
   598  			// Return any non satisfied requests to the pool
   599  			if request.From > 0 {
   600  				taskQueue.Push(request.From, -int64(request.From))
   601  			}
   602  			for _, header := range request.Headers {
   603  				taskQueue.Push(header, -int64(header.Number().Uint64()))
   604  			}
   605  			// Add the peer to the expiry report along the number of failed requests
   606  			expiries[id] = len(request.Headers)
   607  
   608  			// Remove the expired requests from the pending pool directly
   609  			delete(pendPool, id)
   610  		}
   611  	}
   612  	return expiries
   613  }
   614  
   615  // DeliverHeaders injects a header retrieval response into the header results
   616  // cache. This method either accepts all headers it received, or none of them
   617  // if they do not map correctly to the skeleton.
   618  //
   619  // If the headers are accepted, the method makes an attempt to deliver the set
   620  // of ready headers to the processor to keep the pipeline full. However it will
   621  // not block to prevent stalling other pending deliveries.
   622  func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
   623  	q.lock.Lock()
   624  	defer q.lock.Unlock()
   625  
   626  	var logger log.Logger
   627  	if len(id) < 16 {
   628  		// Tests use short IDs, don't choke on them
   629  		logger = log.Log
   630  	} else {
   631  		logger = log.Log
   632  	}
   633  	// Short circuit if the data was never requested
   634  	request := q.headerPendPool[id]
   635  	if request == nil {
   636  		return 0, errNoFetchesPending
   637  	}
   638  	headerReqTimer.UpdateSince(request.Time)
   639  	delete(q.headerPendPool, id)
   640  
   641  	// Ensure headers can be mapped onto the skeleton chain
   642  	targetTo := q.headerToPool[request.From+1]
   643  
   644  	var accepted bool
   645  	requiredHeaderFetch := request.From - targetTo
   646  	if targetTo != 0 || common.NodeLocation.Context() == common.PRIME_CTX {
   647  		requiredHeaderFetch += 1
   648  	}
   649  	accepted = len(headers) == int(requiredHeaderFetch)
   650  
   651  	// reverse the array
   652  	for i, j := 0, len(headers)-1; i < j; i, j = i+1, j-1 {
   653  		headers[i], headers[j] = headers[j], headers[i]
   654  	}
   655  
   656  	if len(headers) > 0 && accepted {
   657  		if headers[len(headers)-1].Number().Uint64() != request.From {
   658  			logger.Info("First header broke chain ordering", "number", headers[0].Number(), "hash", headers[0].Hash(), "expected", request.From)
   659  			accepted = false
   660  		} else if headers[0].NumberU64() != targetTo {
   661  			if targetTo != 0 {
   662  				logger.Info("Last header broke skeleton structure ", "number", headers[0].Number(), "expected", targetTo)
   663  				accepted = false
   664  			}
   665  		}
   666  
   667  		if accepted {
   668  			parentHash := headers[0].Hash()
   669  			for _, header := range headers[1:] {
   670  				hash := header.Hash()
   671  				if parentHash != header.ParentHash() {
   672  					logger.Warn("Header broke chain ancestry", "number", header.Number(), "hash", hash)
   673  					accepted = false
   674  					break
   675  				}
   676  				// Set-up parent hash for next round
   677  				parentHash = hash
   678  			}
   679  		}
   680  	}
   681  	// If the batch of headers wasn't accepted, mark as unavailable
   682  	if !accepted {
   683  		logger.Trace("Skeleton filling not accepted", "from", request.From)
   684  
   685  		miss := q.headerPeerMiss[id]
   686  		if miss == nil {
   687  			q.headerPeerMiss[id] = make(map[uint64]struct{})
   688  			miss = q.headerPeerMiss[id]
   689  		}
   690  		miss[request.From+1] = struct{}{}
   691  
   692  		q.headerTaskQueue.Push(request.From+1, -int64(request.From+1))
   693  		return 0, errors.New("delivery not accepted")
   694  	}
   695  
   696  	if len(headers) > 0 {
   697  		copy(q.headerResults[targetTo-q.headerOffset:], headers)
   698  	}
   699  
   700  	// Clean up a successful fetch and try to deliver any sub-results
   701  	delete(q.headerTaskPool, request.From+1)
   702  	delete(q.headerToPool, request.From+1)
   703  
   704  	ready := int(requiredHeaderFetch)
   705  
   706  	if ready > 0 {
   707  		// Headers are ready for delivery, gather them and push forward (non blocking)
   708  		process := make([]*types.Header, ready)
   709  		copy(process, headers)
   710  
   711  		select {
   712  		case headerProcCh <- process:
   713  			logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number())
   714  			q.headerProced += len(process)
   715  		default:
   716  		}
   717  	}
   718  	// Check for termination and return
   719  	if len(q.headerTaskPool) == 0 {
   720  		q.headerContCh <- false
   721  	}
   722  	return len(headers), nil
   723  }
   724  
   725  // DeliverBodies injects a block body retrieval response into the results queue.
   726  // The method returns the number of blocks bodies accepted from the delivery and
   727  // also wakes any threads waiting for data delivery.
   728  func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header, etxLists [][]*types.Transaction, manifests []types.BlockManifest) (int, error) {
   729  	q.lock.Lock()
   730  	defer q.lock.Unlock()
   731  	nodeCtx := common.NodeLocation.Context()
   732  	trieHasher := trie.NewStackTrie(nil)
   733  	validate := func(index int, header *types.Header) error {
   734  		if nodeCtx != common.ZONE_CTX {
   735  			if len(types.Transactions(txLists[index])) != 0 || len(types.Transactions(etxLists[index])) != 0 || len(uncleLists[index]) != 0 {
   736  				return errInvalidBody
   737  			}
   738  			if types.DeriveSha(manifests[index], trieHasher) != header.ManifestHash(nodeCtx+1) {
   739  				return errInvalidBody
   740  			}
   741  		} else {
   742  			if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash() {
   743  				return errInvalidBody
   744  			}
   745  			if types.DeriveSha(types.Transactions(etxLists[index]), trieHasher) != header.EtxHash() {
   746  				return errInvalidBody
   747  			}
   748  			if types.CalcUncleHash(uncleLists[index]) != header.UncleHash() {
   749  				return errInvalidBody
   750  			}
   751  		}
   752  		return nil
   753  	}
   754  
   755  	reconstruct := func(index int, result *fetchResult) {
   756  		result.Transactions = txLists[index]
   757  		result.Uncles = uncleLists[index]
   758  		result.ExtTransactions = etxLists[index]
   759  		result.SubManifest = manifests[index]
   760  		result.SetBodyDone()
   761  	}
   762  	return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
   763  		bodyReqTimer, len(txLists), validate, reconstruct)
   764  }
   765  
   766  // deliver injects a data retrieval response into the results queue.
   767  //
   768  // Note, this method expects the queue lock to be already held for writing. The
   769  // reason this lock is not obtained in here is because the parameters already need
   770  // to access the queue, so they already need a lock anyway.
   771  func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
   772  	taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer,
   773  	results int, validate func(index int, header *types.Header) error,
   774  	reconstruct func(index int, result *fetchResult)) (int, error) {
   775  
   776  	// Short circuit if the data was never requested
   777  	request := pendPool[id]
   778  	if request == nil {
   779  		return 0, errNoFetchesPending
   780  	}
   781  	reqTimer.UpdateSince(request.Time)
   782  	delete(pendPool, id)
   783  
   784  	// If no data items were retrieved, mark them as unavailable for the origin peer
   785  	if results == 0 {
   786  		for _, header := range request.Headers {
   787  			request.Peer.MarkLacking(header.Hash())
   788  		}
   789  	}
   790  	// Assemble each of the results with their headers and retrieved data parts
   791  	var (
   792  		accepted int
   793  		failure  error
   794  		i        int
   795  		hashes   []common.Hash
   796  	)
   797  	for _, header := range request.Headers {
   798  		// Short circuit assembly if no more fetch results are found
   799  		if i >= results {
   800  			break
   801  		}
   802  		// Validate the fields
   803  		if err := validate(i, header); err != nil {
   804  			failure = err
   805  			break
   806  		}
   807  		hashes = append(hashes, header.Hash())
   808  		i++
   809  	}
   810  
   811  	for _, header := range request.Headers[:i] {
   812  		if res, stale, err := q.resultCache.GetDeliverySlot(header.Number().Uint64()); err == nil {
   813  			reconstruct(accepted, res)
   814  		} else {
   815  			// else: betweeen here and above, some other peer filled this result,
   816  			// or it was indeed a no-op. This should not happen, but if it does it's
   817  			// not something to panic about
   818  			log.Error("Delivery stale", "stale", stale, "number", header.Number().Uint64(), "err", err)
   819  			failure = errStaleDelivery
   820  		}
   821  		// Clean up a successful fetch
   822  		delete(taskPool, hashes[accepted])
   823  		accepted++
   824  	}
   825  	// Return all failed or missing fetches to the queue
   826  	for _, header := range request.Headers[accepted:] {
   827  		taskQueue.Push(header, -int64(header.Number().Uint64()))
   828  	}
   829  	// Wake up Results
   830  	if accepted > 0 {
   831  		q.active.Signal()
   832  	}
   833  	if failure == nil {
   834  		return accepted, nil
   835  	}
   836  	// If none of the data was good, it's a stale delivery
   837  	if accepted > 0 {
   838  		return accepted, fmt.Errorf("partial failure: %v", failure)
   839  	}
   840  	return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
   841  }
   842  
   843  // Prepare configures the result cache to allow accepting and caching inbound
   844  // fetch results.
   845  func (q *queue) Prepare(offset uint64, mode SyncMode) {
   846  	q.lock.Lock()
   847  	defer q.lock.Unlock()
   848  
   849  	// Prepare the queue for sync results
   850  	q.resultCache.Prepare(offset)
   851  	q.mode = mode
   852  }