github.com/klaytn/klaytn@v1.12.1/datasync/downloader/queue.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from eth/downloader/queue.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package downloader
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/klaytn/klaytn/blockchain/types"
    31  	"github.com/klaytn/klaytn/common"
    32  	"github.com/klaytn/klaytn/common/prque"
    33  	"github.com/klaytn/klaytn/consensus/istanbul"
    34  	klaytnmetrics "github.com/klaytn/klaytn/metrics"
    35  	"github.com/klaytn/klaytn/params"
    36  	"github.com/klaytn/klaytn/reward"
    37  	"github.com/rcrowley/go-metrics"
    38  )
    39  
    40  const (
    41  	bodyType        = uint(0)
    42  	receiptType     = uint(1)
    43  	stakingInfoType = uint(2)
    44  )
    45  
    46  var (
    47  	blockCacheMaxItems     = 8192             // Maximum number of blocks to cache before throttling the download
    48  	blockCacheInitialItems = 2048             // Initial number of blocks to start fetching, before we know the sizes of the blocks
    49  	blockCacheMemory       = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching
    50  	blockCacheSizeWeight   = 0.1              // Multiplier to approximate the average block size based on past ones
    51  )
    52  
    53  var (
    54  	errNoFetchesPending = errors.New("no fetches pending")
    55  	errStaleDelivery    = errors.New("stale delivery")
    56  )
    57  
    58  // fetchRequest is a currently running data retrieval operation.
    59  type fetchRequest struct {
    60  	Peer    *peerConnection // Peer to which the request was sent
    61  	From    uint64          // [klay/62] Requested chain element index (used for skeleton fills only)
    62  	Headers []*types.Header // [klay/62] Requested headers, sorted by request order
    63  	Time    time.Time       // Time when the request was made
    64  }
    65  
    66  // fetchResult is a struct collecting partial results from data fetchers until
    67  // all outstanding pieces complete and the result as a whole can be processed.
    68  type fetchResult struct {
    69  	pending int32 // Flag telling what deliveries are outstanding
    70  
    71  	Header       *types.Header
    72  	Transactions types.Transactions
    73  	Receipts     types.Receipts
    74  	StakingInfo  *reward.StakingInfo
    75  }
    76  
    77  func newFetchResult(header *types.Header, mode SyncMode, proposerPolicy uint64) *fetchResult {
    78  	var (
    79  		fastSync = mode == FastSync
    80  		snapSync = mode == SnapSync
    81  	)
    82  	item := &fetchResult{
    83  		Header: header,
    84  	}
    85  	if !header.EmptyBody() {
    86  		item.pending |= (1 << bodyType)
    87  	}
    88  	if (fastSync || snapSync) && !header.EmptyReceipts() {
    89  		item.pending |= (1 << receiptType)
    90  	}
    91  	if (fastSync || snapSync) && proposerPolicy == uint64(istanbul.WeightedRandom) && params.IsStakingUpdateInterval(header.Number.Uint64()) {
    92  		item.pending |= (1 << stakingInfoType)
    93  	}
    94  	return item
    95  }
    96  
    97  // SetBodyDone flags the body as finished.
    98  func (f *fetchResult) SetBodyDone() {
    99  	if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
   100  		atomic.AddInt32(&f.pending, -(1 << bodyType))
   101  	}
   102  }
   103  
   104  // AllDone checks if item is done.
   105  func (f *fetchResult) AllDone() bool {
   106  	return atomic.LoadInt32(&f.pending) == 0
   107  }
   108  
   109  // SetReceiptsDone flags the receipts as finished.
   110  func (f *fetchResult) SetReceiptsDone() {
   111  	if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {
   112  		atomic.AddInt32(&f.pending, -(1 << receiptType))
   113  	}
   114  }
   115  
   116  // SetStakingInfoDone flags the receipts as finished.
   117  func (f *fetchResult) SetStakingInfoDone() {
   118  	if v := atomic.LoadInt32(&f.pending); (v & (1 << stakingInfoType)) != 0 {
   119  		atomic.AddInt32(&f.pending, -(1 << stakingInfoType))
   120  	}
   121  }
   122  
   123  // Done checks if the given type is done already
   124  func (f *fetchResult) Done(kind uint) bool {
   125  	v := atomic.LoadInt32(&f.pending)
   126  	return v&(1<<kind) == 0
   127  }
   128  
   129  // queue represents hashes that are either need fetching or are being fetched
   130  type queue struct {
   131  	mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
   132  
   133  	// Headers are "special", they download in batches, supported by a skeleton chain
   134  	headerHead      common.Hash                    // [klay/62] Hash of the last queued header to verify order
   135  	headerTaskPool  map[uint64]*types.Header       // [klay/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers
   136  	headerTaskQueue *prque.Prque                   // [klay/62] Priority queue of the skeleton indexes to fetch the filling headers for
   137  	headerPeerMiss  map[string]map[uint64]struct{} // [klay/62] Set of per-peer header batches known to be unavailable
   138  	headerPendPool  map[string]*fetchRequest       // [klay/62] Currently pending header retrieval operations
   139  	headerResults   []*types.Header                // [klay/62] Result cache accumulating the completed headers
   140  	headerProced    int                            // [klay/62] Number of headers already processed from the results
   141  	headerOffset    uint64                         // [klay/62] Number of the first header in the result cache
   142  	headerContCh    chan bool                      // [klay/62] Channel to notify when header download finishes
   143  
   144  	// All data retrievals below are based on an already assembles header chain
   145  	blockTaskPool  map[common.Hash]*types.Header // [klay/62] Pending block (body) retrieval tasks, mapping hashes to headers
   146  	blockTaskQueue *prque.Prque                  // [klay/62] Priority queue of the headers to fetch the blocks (bodies) for
   147  	blockPendPool  map[string]*fetchRequest      // [klay/62] Currently pending block (body) retrieval operations
   148  
   149  	receiptTaskPool  map[common.Hash]*types.Header // [klay/63] Pending receipt retrieval tasks, mapping hashes to headers
   150  	receiptTaskQueue *prque.Prque                  // [klay/63] Priority queue of the headers to fetch the receipts for
   151  	receiptPendPool  map[string]*fetchRequest      // [klay/63] Currently pending receipt retrieval operations
   152  
   153  	stakingInfoTaskPool  map[common.Hash]*types.Header // [klay/65] Pending staking info retrieval tasks, mapping hashes to headers
   154  	stakingInfoTaskQueue *prque.Prque                  // [klay/65] Priority queue of the headers to fetch the staking infos for
   155  	stakingInfoPendPool  map[string]*fetchRequest      // [klay/65] Currently pending staking info retrieval operations
   156  
   157  	resultCache *resultStore       // Downloaded but not yet delivered fetch results
   158  	resultSize  common.StorageSize // Approximate size of a block (exponential moving average)
   159  
   160  	lock   *sync.RWMutex
   161  	active *sync.Cond
   162  	closed bool
   163  
   164  	proposerPolicy uint64
   165  
   166  	lastStatLog time.Time
   167  }
   168  
   169  // newQueue creates a new download queue for scheduling block retrieval.
   170  func newQueue(blockCacheLimit int, thresholdInitialSize int, proposerPolicy uint64) *queue {
   171  	lock := new(sync.RWMutex)
   172  	q := &queue{
   173  		headerContCh:         make(chan bool),
   174  		blockTaskQueue:       prque.New(),
   175  		receiptTaskQueue:     prque.New(),
   176  		stakingInfoTaskQueue: prque.New(),
   177  		active:               sync.NewCond(lock),
   178  		lock:                 lock,
   179  		proposerPolicy:       proposerPolicy,
   180  	}
   181  	q.Reset(blockCacheLimit, thresholdInitialSize)
   182  	return q
   183  }
   184  
   185  // Reset clears out the queue contents.
   186  func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) {
   187  	q.lock.Lock()
   188  	defer q.lock.Unlock()
   189  
   190  	q.closed = false
   191  	q.mode = FullSync
   192  
   193  	q.headerHead = common.Hash{}
   194  	q.headerPendPool = make(map[string]*fetchRequest)
   195  
   196  	q.blockTaskPool = make(map[common.Hash]*types.Header)
   197  	q.blockTaskQueue.Reset()
   198  	q.blockPendPool = make(map[string]*fetchRequest)
   199  
   200  	q.receiptTaskPool = make(map[common.Hash]*types.Header)
   201  	q.receiptTaskQueue.Reset()
   202  	q.receiptPendPool = make(map[string]*fetchRequest)
   203  
   204  	q.stakingInfoTaskPool = make(map[common.Hash]*types.Header)
   205  	q.stakingInfoTaskQueue.Reset()
   206  	q.stakingInfoPendPool = make(map[string]*fetchRequest)
   207  
   208  	q.resultCache = newResultStore(blockCacheLimit)
   209  	q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize))
   210  }
   211  
   212  // Close marks the end of the sync, unblocking WaitResults.
   213  // It may be called even if the queue is already closed.
   214  func (q *queue) Close() {
   215  	q.lock.Lock()
   216  	q.closed = true
   217  	q.active.Signal()
   218  	q.lock.Unlock()
   219  }
   220  
   221  // PendingHeaders retrieves the number of header requests pending for retrieval.
   222  func (q *queue) PendingHeaders() int {
   223  	q.lock.Lock()
   224  	defer q.lock.Unlock()
   225  
   226  	return q.headerTaskQueue.Size()
   227  }
   228  
   229  // PendingBlocks retrieves the number of block (body) requests pending for retrieval.
   230  func (q *queue) PendingBlocks() int {
   231  	q.lock.Lock()
   232  	defer q.lock.Unlock()
   233  
   234  	return q.blockTaskQueue.Size()
   235  }
   236  
   237  // PendingReceipts retrieves the number of block receipts pending for retrieval.
   238  func (q *queue) PendingReceipts() int {
   239  	q.lock.Lock()
   240  	defer q.lock.Unlock()
   241  
   242  	return q.receiptTaskQueue.Size()
   243  }
   244  
   245  // PendingStakingInfos retrieves the number of staking information pending for retrieval.
   246  func (q *queue) PendingStakingInfos() int {
   247  	q.lock.Lock()
   248  	defer q.lock.Unlock()
   249  
   250  	return q.stakingInfoTaskQueue.Size()
   251  }
   252  
   253  // InFlightHeaders retrieves whether there are header fetch requests currently
   254  // in flight.
   255  func (q *queue) InFlightHeaders() bool {
   256  	q.lock.Lock()
   257  	defer q.lock.Unlock()
   258  
   259  	return len(q.headerPendPool) > 0
   260  }
   261  
   262  // InFlightBlocks retrieves whether there are block fetch requests currently in
   263  // flight.
   264  func (q *queue) InFlightBlocks() bool {
   265  	q.lock.Lock()
   266  	defer q.lock.Unlock()
   267  
   268  	return len(q.blockPendPool) > 0
   269  }
   270  
   271  // InFlightReceipts retrieves whether there are receipt fetch requests currently
   272  // in flight.
   273  func (q *queue) InFlightReceipts() bool {
   274  	q.lock.Lock()
   275  	defer q.lock.Unlock()
   276  
   277  	return len(q.receiptPendPool) > 0
   278  }
   279  
   280  // InFlightStakingInfos retrieves whether there are staking info fetch requests currently
   281  // in flight.
   282  func (q *queue) InFlightStakingInfos() bool {
   283  	q.lock.Lock()
   284  	defer q.lock.Unlock()
   285  
   286  	return len(q.stakingInfoPendPool) > 0
   287  }
   288  
   289  // Idle returns if the queue is fully idle or has some data still inside.
   290  func (q *queue) Idle() bool {
   291  	q.lock.Lock()
   292  	defer q.lock.Unlock()
   293  
   294  	queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stakingInfoTaskQueue.Size()
   295  	pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.stakingInfoPendPool)
   296  
   297  	return (queued + pending) == 0
   298  }
   299  
   300  // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
   301  // up an already retrieved header skeleton.
   302  func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
   303  	q.lock.Lock()
   304  	defer q.lock.Unlock()
   305  
   306  	// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
   307  	if q.headerResults != nil {
   308  		panic("skeleton assembly already in progress")
   309  	}
   310  	// Schedule all the header retrieval tasks for the skeleton assembly
   311  	q.headerTaskPool = make(map[uint64]*types.Header)
   312  	q.headerTaskQueue = prque.New()
   313  	q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
   314  	q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
   315  	q.headerProced = 0
   316  	q.headerOffset = from
   317  	q.headerContCh = make(chan bool, 1)
   318  
   319  	for i, header := range skeleton {
   320  		index := from + uint64(i*MaxHeaderFetch)
   321  
   322  		q.headerTaskPool[index] = header
   323  		q.headerTaskQueue.Push(index, -int64(index))
   324  	}
   325  }
   326  
   327  // RetrieveHeaders retrieves the header chain assemble based on the scheduled
   328  // skeleton.
   329  func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
   330  	q.lock.Lock()
   331  	defer q.lock.Unlock()
   332  
   333  	headers, proced := q.headerResults, q.headerProced
   334  	q.headerResults, q.headerProced = nil, 0
   335  
   336  	return headers, proced
   337  }
   338  
   339  // Schedule adds a set of headers for the download queue for scheduling, returning
   340  // the new headers encountered.
   341  func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
   342  	q.lock.Lock()
   343  	defer q.lock.Unlock()
   344  
   345  	// Insert all the headers prioritised by the contained block number
   346  	inserts := make([]*types.Header, 0, len(headers))
   347  	for _, header := range headers {
   348  		// Make sure chain order is honoured and preserved throughout
   349  		hash := header.Hash()
   350  		if header.Number == nil || header.Number.Uint64() != from {
   351  			logger.Trace("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
   352  			break
   353  		}
   354  		if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
   355  			logger.Trace("Header broke chain ancestry", "number", header.Number, "hash", hash)
   356  			break
   357  		}
   358  		// Make sure no duplicate requests are executed
   359  		// We cannot skip this, even if the block is empty, since this is
   360  		// what triggers the fetchResult creation.
   361  		if _, ok := q.blockTaskPool[hash]; ok {
   362  			logger.Trace("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
   363  		} else {
   364  			q.blockTaskPool[hash] = header
   365  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   366  		}
   367  		// Queue for receipt retrieval
   368  		if (q.mode == FastSync || q.mode == SnapSync) && !header.EmptyReceipts() {
   369  			if _, ok := q.receiptTaskPool[hash]; ok {
   370  				logger.Trace("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
   371  			} else {
   372  				q.receiptTaskPool[hash] = header
   373  				q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   374  			}
   375  		}
   376  
   377  		if (q.mode == FastSync || q.mode == SnapSync) && q.proposerPolicy == uint64(istanbul.WeightedRandom) && params.IsStakingUpdateInterval(header.Number.Uint64()) {
   378  			if _, ok := q.stakingInfoTaskPool[hash]; ok {
   379  				logger.Trace("Header already scheduled for staking info fetch", "number", header.Number, "hash", hash)
   380  			} else {
   381  				q.stakingInfoTaskPool[hash] = header
   382  				q.stakingInfoTaskQueue.Push(header, -int64(header.Number.Uint64()))
   383  			}
   384  		}
   385  		inserts = append(inserts, header)
   386  		q.headerHead = hash
   387  		from++
   388  	}
   389  	return inserts
   390  }
   391  
   392  // Results retrieves and permanently removes a batch of fetch results from
   393  // the cache. The result slice will be empty if the queue has been closed.
   394  // Results can be called concurrently with Deliver and Schedule,
   395  // but assumes that there are not two simultaneous callers to Results
   396  func (q *queue) Results(block bool) []*fetchResult {
   397  	// Abort early if there are no items and non-blocking requested
   398  	if !block && !q.resultCache.HasCompletedItems() {
   399  		return nil
   400  	}
   401  	closed := false
   402  	for !closed && !q.resultCache.HasCompletedItems() {
   403  		// In order to wait on 'active', we need to obtain the lock.
   404  		// That may take a while, if someone is delivering at the same
   405  		// time, so after obtaining the lock, we check again if there
   406  		// are any results to fetch.
   407  		// Also, in-between we ask for the lock and the lock is obtained,
   408  		// someone can have closed the queue. In that case, we should
   409  		// return the available results and stop blocking
   410  		q.lock.Lock()
   411  		if q.resultCache.HasCompletedItems() || q.closed {
   412  			q.lock.Unlock()
   413  			break
   414  		}
   415  		// No items available, and not closed
   416  		q.active.Wait()
   417  		closed = q.closed
   418  		q.lock.Unlock()
   419  	}
   420  	// Regardless if closed or not, we can still deliver whatever we have
   421  	results := q.resultCache.GetCompleted(maxResultsProcess)
   422  	for _, result := range results {
   423  		// Recalculate the result item weights to prevent memory exhaustion
   424  		size := result.Header.Size()
   425  		for _, receipt := range result.Receipts {
   426  			size += receipt.Size()
   427  		}
   428  		for _, tx := range result.Transactions {
   429  			size += tx.Size()
   430  		}
   431  		q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
   432  			(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
   433  	}
   434  	// Using the newly calibrated resultSize, figure out the new throttle limit
   435  	// on the result cache
   436  	throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
   437  	throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
   438  
   439  	// Log some info at certain times
   440  	if time.Since(q.lastStatLog) > 60*time.Second {
   441  		q.lastStatLog = time.Now()
   442  		info := q.Stats()
   443  		info = append(info, "throttle", throttleThreshold)
   444  		logger.Info("Downloader queue stats", info...)
   445  	}
   446  	return results
   447  }
   448  
   449  func (q *queue) Stats() []interface{} {
   450  	q.lock.RLock()
   451  	defer q.lock.RUnlock()
   452  
   453  	return q.stats()
   454  }
   455  
   456  func (q *queue) stats() []interface{} {
   457  	return []interface{}{
   458  		"receiptTasks", q.receiptTaskQueue.Size(),
   459  		"blockTasks", q.blockTaskQueue.Size(),
   460  		"stakingInfoTasks", q.stakingInfoTaskQueue.Size(),
   461  		"itemSize", q.resultSize,
   462  	}
   463  }
   464  
   465  // ReserveHeaders reserves a set of headers for the given peer, skipping any
   466  // previously failed batches.
   467  func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
   468  	q.lock.Lock()
   469  	defer q.lock.Unlock()
   470  
   471  	// Short circuit if the peer's already downloading something (sanity check to
   472  	// not corrupt state)
   473  	if _, ok := q.headerPendPool[p.id]; ok {
   474  		return nil
   475  	}
   476  	// Retrieve a batch of hashes, skipping previously failed ones
   477  	send, skip := uint64(0), []uint64{}
   478  	for send == 0 && !q.headerTaskQueue.Empty() {
   479  		from, _ := q.headerTaskQueue.Pop()
   480  		if q.headerPeerMiss[p.id] != nil {
   481  			if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
   482  				skip = append(skip, from.(uint64))
   483  				continue
   484  			}
   485  		}
   486  		send = from.(uint64)
   487  	}
   488  	// Merge all the skipped batches back
   489  	for _, from := range skip {
   490  		q.headerTaskQueue.Push(from, -int64(from))
   491  	}
   492  	// Assemble and return the block download request
   493  	if send == 0 {
   494  		return nil
   495  	}
   496  	request := &fetchRequest{
   497  		Peer: p,
   498  		From: send,
   499  		Time: time.Now(),
   500  	}
   501  	q.headerPendPool[p.id] = request
   502  	return request
   503  }
   504  
   505  // ReserveBodies reserves a set of body fetches for the given peer, skipping any
   506  // previously failed downloads. Beside the next batch of needed fetches, it also
   507  // returns a flag whether empty blocks were queued requiring processing.
   508  func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   509  	q.lock.Lock()
   510  	defer q.lock.Unlock()
   511  
   512  	return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
   513  }
   514  
   515  // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
   516  // any previously failed downloads. Beside the next batch of needed fetches, it
   517  // also returns a flag whether empty receipts were queued requiring importing.
   518  func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   519  	q.lock.Lock()
   520  	defer q.lock.Unlock()
   521  
   522  	return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)
   523  }
   524  
   525  // ReserveStakingInfos reserves a set of staking info fetches for the given peer, skipping
   526  // any previously failed downloads. Beside the next batch of needed fetches, it
   527  // also returns a flag whether empty receipts were queued requiring importing.
   528  func (q *queue) ReserveStakingInfos(p *peerConnection, count int) (*fetchRequest, bool, bool) {
   529  	q.lock.Lock()
   530  	defer q.lock.Unlock()
   531  
   532  	return q.reserveHeaders(p, count, q.stakingInfoTaskPool, q.stakingInfoTaskQueue, q.stakingInfoPendPool, stakingInfoType)
   533  }
   534  
   535  // reserveHeaders reserves a set of data download operations for a given peer,
   536  // skipping any previously failed ones. This method is a generic version used
   537  // by the individual special reservation functions.
   538  //
   539  // Note, this method expects the queue lock to be already held for writing. The
   540  // reason the lock is not obtained in here is because the parameters already need
   541  // to access the queue, so they already need a lock anyway.
   542  //
   543  // Returns:
   544  //   item     - the fetchRequest
   545  //   progress - whether any progress was made
   546  //   throttle - if the caller should throttle for a while
   547  func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
   548  	pendPool map[string]*fetchRequest, kind uint,
   549  ) (*fetchRequest, bool, bool) {
   550  	// Short circuit if the pool has been depleted, or if the peer's already
   551  	// downloading something (sanity check not to corrupt state)
   552  	if taskQueue.Empty() {
   553  		return nil, false, true
   554  	}
   555  	if _, ok := pendPool[p.id]; ok {
   556  		return nil, false, false
   557  	}
   558  
   559  	// Retrieve a batch of tasks, skipping previously failed ones
   560  	send := make([]*types.Header, 0, count)
   561  	skip := make([]*types.Header, 0)
   562  
   563  	progress := false
   564  	throttled := false
   565  	for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
   566  		// the task queue will pop items in order, so the highest prior block
   567  		// is also the lowest block number.
   568  		h, _ := taskQueue.Peek()
   569  		header := h.(*types.Header)
   570  		// we can ask the resultCache if this header is within the
   571  		// "prioritized" segment of blocks. If it is not, we need to throttle
   572  		stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode, q.proposerPolicy)
   573  		if stale {
   574  			// Don't put back in the task queue, this item has already been
   575  			// delivered upstream
   576  			taskQueue.PopItem()
   577  			progress = true
   578  			delete(taskPool, header.Hash())
   579  			proc = proc - 1
   580  			logger.Debug("Fetch reservation already delivered", "number", header.Number.Uint64())
   581  			continue
   582  		}
   583  		if throttle {
   584  			// There are no result slots available. Leave it in the task queue
   585  			// However, if there are any left as 'skipped', we should not tell
   586  			// the caller to throttle, since we still want some other
   587  			// peer to fetch those for us
   588  			throttled = len(skip) == 0
   589  			break
   590  		}
   591  		if err != nil {
   592  			// this most definitely should _not_ happen
   593  			logger.Warn("Failed to reserve headers", "err", err)
   594  			break
   595  		}
   596  		if item.Done(kind) {
   597  			// If it's a noop, we can skip this task
   598  			delete(taskPool, header.Hash())
   599  			taskQueue.PopItem()
   600  			proc = proc - 1
   601  			progress = true
   602  			continue
   603  		}
   604  		// Remove it from the task queue
   605  		taskQueue.PopItem()
   606  		// Otherwise unless the peer is known not to have the data, add to the retrieve list
   607  		if p.Lacks(header.Hash()) {
   608  			skip = append(skip, header)
   609  		} else {
   610  			send = append(send, header)
   611  		}
   612  	}
   613  	// Merge all the skipped headers back
   614  	for _, header := range skip {
   615  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   616  	}
   617  	if q.resultCache.HasCompletedItems() {
   618  		// Wake Results, resultCache was modified
   619  		q.active.Signal()
   620  	}
   621  	// Assemble and return the block download request
   622  	if len(send) == 0 {
   623  		return nil, progress, throttled
   624  	}
   625  	request := &fetchRequest{
   626  		Peer:    p,
   627  		Headers: send,
   628  		Time:    time.Now(),
   629  	}
   630  	pendPool[p.id] = request
   631  	return request, progress, throttled
   632  }
   633  
   634  // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
   635  func (q *queue) CancelHeaders(request *fetchRequest) {
   636  	q.lock.Lock()
   637  	defer q.lock.Unlock()
   638  	q.cancel(request, q.headerTaskQueue, q.headerPendPool)
   639  }
   640  
   641  // CancelBodies aborts a body fetch request, returning all pending headers to the
   642  // task queue.
   643  func (q *queue) CancelBodies(request *fetchRequest) {
   644  	q.lock.Lock()
   645  	defer q.lock.Unlock()
   646  	q.cancel(request, q.blockTaskQueue, q.blockPendPool)
   647  }
   648  
   649  // CancelReceipts aborts a body fetch request, returning all pending headers to
   650  // the task queue.
   651  func (q *queue) CancelReceipts(request *fetchRequest) {
   652  	q.lock.Lock()
   653  	defer q.lock.Unlock()
   654  	q.cancel(request, q.receiptTaskQueue, q.receiptPendPool)
   655  }
   656  
   657  // CancelStakingInfo aborts a body fetch request, returning all pending headers to
   658  // the task queue.
   659  func (q *queue) CancelStakingInfo(request *fetchRequest) {
   660  	q.lock.Lock()
   661  	defer q.lock.Unlock()
   662  	q.cancel(request, q.stakingInfoTaskQueue, q.stakingInfoPendPool)
   663  }
   664  
   665  // Cancel aborts a fetch request, returning all pending hashes to the task queue.
   666  func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) {
   667  	if request.From > 0 {
   668  		taskQueue.Push(request.From, -int64(request.From))
   669  	}
   670  	for _, header := range request.Headers {
   671  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   672  	}
   673  	delete(pendPool, request.Peer.id)
   674  }
   675  
   676  // Revoke cancels all pending requests belonging to a given peer. This method is
   677  // meant to be called during a peer drop to quickly reassign owned data fetches
   678  // to remaining nodes.
   679  func (q *queue) Revoke(peerId string) {
   680  	q.lock.Lock()
   681  	defer q.lock.Unlock()
   682  
   683  	if request, ok := q.blockPendPool[peerId]; ok {
   684  		for _, header := range request.Headers {
   685  			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
   686  		}
   687  		delete(q.blockPendPool, peerId)
   688  	}
   689  	if request, ok := q.receiptPendPool[peerId]; ok {
   690  		for _, header := range request.Headers {
   691  			q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
   692  		}
   693  		delete(q.receiptPendPool, peerId)
   694  	}
   695  	if request, ok := q.stakingInfoPendPool[peerId]; ok {
   696  		for _, header := range request.Headers {
   697  			q.stakingInfoTaskQueue.Push(header, -int64(header.Number.Uint64()))
   698  		}
   699  		delete(q.stakingInfoPendPool, peerId)
   700  	}
   701  }
   702  
   703  // ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
   704  // canceling them and returning the responsible peers for penalisation.
   705  func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
   706  	q.lock.Lock()
   707  	defer q.lock.Unlock()
   708  
   709  	return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
   710  }
   711  
   712  // ExpireBodies checks for in flight block body requests that exceeded a timeout
   713  // allowance, canceling them and returning the responsible peers for penalisation.
   714  func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
   715  	q.lock.Lock()
   716  	defer q.lock.Unlock()
   717  
   718  	return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)
   719  }
   720  
   721  // ExpireReceipts checks for in flight receipt requests that exceeded a timeout
   722  // allowance, canceling them and returning the responsible peers for penalisation.
   723  func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int {
   724  	q.lock.Lock()
   725  	defer q.lock.Unlock()
   726  
   727  	return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter)
   728  }
   729  
   730  // ExpireStakingInfos checks for in flight staking info requests that exceeded a timeout
   731  // allowance, canceling them and returning the responsible peers for penalisation.
   732  func (q *queue) ExpireStakingInfos(timeout time.Duration) map[string]int {
   733  	q.lock.Lock()
   734  	defer q.lock.Unlock()
   735  
   736  	return q.expire(timeout, q.stakingInfoPendPool, q.stakingInfoTaskQueue, stakingInfoTimeoutMeter)
   737  }
   738  
   739  // expire is the generic check that move expired tasks from a pending pool back
   740  // into a task pool, returning all entities caught with expired tasks.
   741  //
   742  // Note, this method expects the queue lock to be already held. The
   743  // reason the lock is not obtained in here is because the parameters already need
   744  // to access the queue, so they already need a lock anyway.
   745  func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
   746  	// Iterate over the expired requests and return each to the queue
   747  	expiries := make(map[string]int)
   748  	for id, request := range pendPool {
   749  		if time.Since(request.Time) > timeout {
   750  			// Update the metrics with the timeout
   751  			timeoutMeter.Mark(1)
   752  
   753  			// Return any non satisfied requests to the pool
   754  			if request.From > 0 {
   755  				taskQueue.Push(request.From, -int64(request.From))
   756  			}
   757  			for _, header := range request.Headers {
   758  				taskQueue.Push(header, -int64(header.Number.Uint64()))
   759  			}
   760  			// Add the peer to the expiry report along the number of failed requests
   761  			expiries[id] = len(request.Headers)
   762  
   763  			// Remove the expired requests from the pending pool
   764  			delete(pendPool, id)
   765  		}
   766  	}
   767  	return expiries
   768  }
   769  
   770  // DeliverHeaders injects a header retrieval response into the header results
   771  // cache. This method either accepts all headers it received, or none of them
   772  // if they do not map correctly to the skeleton.
   773  //
   774  // If the headers are accepted, the method makes an attempt to deliver the set
   775  // of ready headers to the processor to keep the pipeline full. However it will
   776  // not block to prevent stalling other pending deliveries.
   777  func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
   778  	q.lock.Lock()
   779  	defer q.lock.Unlock()
   780  
   781  	// Short circuit if the data was never requested
   782  	request := q.headerPendPool[id]
   783  	if request == nil {
   784  		return 0, errNoFetchesPending
   785  	}
   786  	headerReqTimer.Update(time.Since(request.Time))
   787  	delete(q.headerPendPool, id)
   788  
   789  	// Ensure headers can be mapped onto the skeleton chain
   790  	target := q.headerTaskPool[request.From].Hash()
   791  
   792  	accepted := len(headers) == MaxHeaderFetch
   793  	if accepted {
   794  		if headers[0].Number.Uint64() != request.From {
   795  			logger.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From)
   796  			accepted = false
   797  		} else if headers[len(headers)-1].Hash() != target {
   798  			logger.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target)
   799  			accepted = false
   800  		}
   801  	}
   802  	if accepted {
   803  		parentHash := headers[0].Hash()
   804  		for i, header := range headers[1:] {
   805  			hash := header.Hash()
   806  			if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
   807  				logger.Trace("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want)
   808  				accepted = false
   809  				break
   810  			}
   811  			if parentHash != header.ParentHash {
   812  				logger.Trace("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash)
   813  				accepted = false
   814  				break
   815  			}
   816  			// Set-up parent hash for next round
   817  			parentHash = hash
   818  		}
   819  	}
   820  	// If the batch of headers wasn't accepted, mark as unavailable
   821  	if !accepted {
   822  		logger.Trace("Skeleton filling not accepted", "peer", id, "from", request.From)
   823  
   824  		miss := q.headerPeerMiss[id]
   825  		if miss == nil {
   826  			q.headerPeerMiss[id] = make(map[uint64]struct{})
   827  			miss = q.headerPeerMiss[id]
   828  		}
   829  		miss[request.From] = struct{}{}
   830  
   831  		q.headerTaskQueue.Push(request.From, -int64(request.From))
   832  		return 0, errors.New("delivery not accepted")
   833  	}
   834  	// Clean up a successful fetch and try to deliver any sub-results
   835  	copy(q.headerResults[request.From-q.headerOffset:], headers)
   836  	delete(q.headerTaskPool, request.From)
   837  
   838  	ready := 0
   839  	for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
   840  		ready += MaxHeaderFetch
   841  	}
   842  	if ready > 0 {
   843  		// Headers are ready for delivery, gather them and push forward (non blocking)
   844  		process := make([]*types.Header, ready)
   845  		copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
   846  
   847  		select {
   848  		case headerProcCh <- process:
   849  			logger.Trace("Pre-scheduled new headers", "peer", id, "count", len(process), "from", process[0].Number)
   850  			q.headerProced += len(process)
   851  		default:
   852  		}
   853  	}
   854  	// Check for termination and return
   855  	if len(q.headerTaskPool) == 0 {
   856  		q.headerContCh <- false
   857  	}
   858  	return len(headers), nil
   859  }
   860  
   861  // DeliverBodies injects a block body retrieval response into the results queue.
   862  // The method returns the number of blocks bodies accepted from the delivery and
   863  // also wakes any threads waiting for data delivery.
   864  func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction) (int, error) {
   865  	q.lock.Lock()
   866  	defer q.lock.Unlock()
   867  
   868  	validate := func(index int, header *types.Header) error {
   869  		if types.DeriveSha(types.Transactions(txLists[index]), header.Number) != header.TxHash {
   870  			return errInvalidBody
   871  		}
   872  		return nil
   873  	}
   874  
   875  	reconstruct := func(index int, result *fetchResult) {
   876  		result.Transactions = txLists[index]
   877  		result.SetBodyDone()
   878  	}
   879  	return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyReqTimer, len(txLists), validate, reconstruct)
   880  }
   881  
   882  // DeliverReceipts injects a receipt retrieval response into the results queue.
   883  // The method returns the number of transaction receipts accepted from the delivery
   884  // and also wakes any threads waiting for data delivery.
   885  func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
   886  	q.lock.Lock()
   887  	defer q.lock.Unlock()
   888  	validate := func(index int, header *types.Header) error {
   889  		if types.DeriveSha(types.Receipts(receiptList[index]), header.Number) != header.ReceiptHash {
   890  			return errInvalidReceipt
   891  		}
   892  		return nil
   893  	}
   894  
   895  	reconstruct := func(index int, result *fetchResult) {
   896  		result.Receipts = receiptList[index]
   897  		result.SetReceiptsDone()
   898  	}
   899  	return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptReqTimer, len(receiptList), validate, reconstruct)
   900  }
   901  
   902  // DeliverStakingInfos injects a stakinginfo retrieval response into the results queue.
   903  // The method returns the number of staking information accepted from the delivery
   904  // and also wakes any threads waiting for data delivery.
   905  func (q *queue) DeliverStakingInfos(id string, stakingInfoList []*reward.StakingInfo) (int, error) {
   906  	q.lock.Lock()
   907  	defer q.lock.Unlock()
   908  	validate := func(index int, header *types.Header) error {
   909  		// TODO-Klaytn-Snapsync update validation logic
   910  		return nil
   911  	}
   912  
   913  	reconstruct := func(index int, result *fetchResult) {
   914  		result.StakingInfo = stakingInfoList[index]
   915  		result.SetStakingInfoDone()
   916  	}
   917  	return q.deliver(id, q.stakingInfoTaskPool, q.stakingInfoTaskQueue, q.stakingInfoPendPool, stakingInfoReqTimer, len(stakingInfoList), validate, reconstruct)
   918  }
   919  
   920  // deliver injects a data retrieval response into the results queue.
   921  //
   922  // Note, this method expects the queue lock to be already held for writing. The
   923  // reason this lock is not obtained in here is because the parameters already need
   924  // to access the queue, so they already need a lock anyway.
   925  func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
   926  	pendPool map[string]*fetchRequest, reqTimer klaytnmetrics.HybridTimer,
   927  	results int, validate func(index int, header *types.Header) error,
   928  	reconstruct func(index int, result *fetchResult),
   929  ) (int, error) {
   930  	// Short circuit if the data was never requested
   931  	request := pendPool[id]
   932  	if request == nil {
   933  		return 0, errNoFetchesPending
   934  	}
   935  	reqTimer.Update(time.Since(request.Time))
   936  	delete(pendPool, id)
   937  
   938  	// If no data items were retrieved, mark them as unavailable for the origin peer
   939  	if results == 0 {
   940  		for _, header := range request.Headers {
   941  			request.Peer.MarkLacking(header.Hash())
   942  		}
   943  	}
   944  	// Assemble each of the results with their headers and retrieved data parts
   945  	var (
   946  		accepted int
   947  		failure  error
   948  		i        int
   949  		hashes   []common.Hash
   950  	)
   951  	for _, header := range request.Headers {
   952  		// Short circuit assembly if no more fetch results are found
   953  		if i >= results {
   954  			break
   955  		}
   956  		// Validate the fields
   957  		if err := validate(i, header); err != nil {
   958  			failure = err
   959  			break
   960  		}
   961  		hashes = append(hashes, header.Hash())
   962  		i++
   963  	}
   964  
   965  	for _, header := range request.Headers[:i] {
   966  		if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil {
   967  			reconstruct(accepted, res)
   968  		} else {
   969  			// else: between here and above, some other peer filled this result,
   970  			// or it was indeed a no-op. This should not happen, but if it does it's
   971  			// not something to panic about
   972  			logger.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)
   973  			failure = errStaleDelivery
   974  		}
   975  		// Clean up a successful fetch
   976  		delete(taskPool, hashes[accepted])
   977  		accepted++
   978  	}
   979  	// Return all failed or missing fetches to the queue
   980  	for _, header := range request.Headers {
   981  		taskQueue.Push(header, -int64(header.Number.Uint64()))
   982  	}
   983  	// Wake up WaitResults
   984  	if accepted > 0 {
   985  		q.active.Signal()
   986  	}
   987  
   988  	// If none of the data was good, it's a stale delivery
   989  	if failure == nil {
   990  		return accepted, nil
   991  	}
   992  	if accepted > 0 {
   993  		return accepted, fmt.Errorf("partial failure: %v", failure)
   994  	}
   995  	return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
   996  }
   997  
   998  // Prepare configures the result cache to allow accepting and caching inbound
   999  // fetch results.
  1000  func (q *queue) Prepare(offset uint64, mode SyncMode) {
  1001  	q.lock.Lock()
  1002  	defer q.lock.Unlock()
  1003  
  1004  	// Prepare the queue for sync results
  1005  	q.resultCache.Prepare(offset)
  1006  	q.mode = mode
  1007  }