github.com/annchain/OG@v0.0.9/og/downloader/queue.go (about)

     1  package downloader
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	types2 "github.com/annchain/OG/arefactor/og/types"
     7  	"github.com/annchain/OG/common"
     8  	"github.com/annchain/OG/metrics"
     9  	"github.com/annchain/OG/og/protocol/dagmessage"
    10  	"github.com/annchain/OG/og/types"
    11  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    12  	"sync"
    13  	"time"
    14  )
    15  
    16  var (
    17  	blockCacheItems      = 8192             // Maximum number of blocks to cache before throttling the download
    18  	blockCacheMemory     = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching
    19  	blockCacheSizeWeight = 0.1              // Multiplier to approximate the average block size based on past ones
    20  )
    21  
    22  var (
    23  	errNoFetchesPending = errors.New("no fetches pending")
    24  	errStaleDelivery    = errors.New("stale delivery")
    25  )
    26  
    27  // fetchRequest is a currently running data retrieval operation.
    28  type fetchRequest struct {
    29  	Peer    *peerConnection               // Sender to which the request was sent
    30  	From    uint64                        // [og/01] Requested chain element index (used for skeleton fills only)
    31  	Headers []*dagmessage.SequencerHeader // [og/01] Requested headers, sorted by request order
    32  	Time    time.Time                     // Time when the request was made
    33  }
    34  
    35  // fetchResult is a struct collecting partial results from data fetchers until
    36  // all outstanding pieces complete and the result as a whole can be processed.
    37  type fetchResult struct {
    38  	Pending int         // Number of data fetches still pending
    39  	Hash    types2.Hash // Hash of the header to prevent recalculating
    40  
    41  	Header       *dagmessage.SequencerHeader
    42  	Transactions types.Txis
    43  	Sequencer    *types.Sequencer
    44  }
    45  
    46  // queue represents hashes that are either need fetching or are being fetched
    47  type queue struct {
    48  	mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
    49  
    50  	// Headers are "special", they download in batches, supported by a skeleton chain
    51  	headerHead      types2.Hash                            // [og/01] Hash of the last queued header to verify order
    52  	headerTaskPool  map[uint64]*dagmessage.SequencerHeader // [og/01] Pending header retrieval tasks, mapping starting indexes to skeleton headers
    53  	headerTaskQueue *prque.Prque                           // [og/01] Priority queue of the skeleton indexes to fetch the filling headers for
    54  	headerPeerMiss  map[string]map[uint64]struct{}         // [og/01] Set of per-peer header batches known to be unavailable
    55  	headerPendPool  map[string]*fetchRequest               // [og/01] Currently pending header retrieval operations
    56  	headerResults   []*dagmessage.SequencerHeader          // [og/01] Result cache accumulating the completed headers
    57  	headerProced    int                                    // [og/01] Number of headers already processed from the results
    58  	headerOffset    uint64                                 // [og/01] Number of the first header in the result cache
    59  	headerContCh    chan bool                              // [og/01] Channel to notify when header download finishes
    60  
    61  	// All data retrievals below are based on an already assembles header chain
    62  	blockTaskPool  map[types2.Hash]*dagmessage.SequencerHeader // [og/01] Pending block (body) retrieval tasks, mapping hashes to headers
    63  	blockTaskQueue *prque.Prque                                // [og/01] Priority queue of the headers to fetch the blocks (bodies) for
    64  	blockPendPool  map[string]*fetchRequest                    // [og/01] Currently pending block (body) retrieval operations
    65  	blockDonePool  map[types2.Hash]struct{}                    // [og/01] Set of the completed block (body) fetches
    66  
    67  	receiptTaskPool  map[types2.Hash]*dagmessage.SequencerHeader // [og/02] Pending receipt retrieval tasks, mapping hashes to headers
    68  	receiptTaskQueue *prque.Prque                                // [og/02] Priority queue of the headers to fetch the receipts for
    69  	receiptPendPool  map[string]*fetchRequest                    // [og/02] Currently pending receipt retrieval operations
    70  	receiptDonePool  map[types2.Hash]struct{}                    // [og/02] Set of the completed receipt fetches
    71  
    72  	resultCache  []*fetchResult     // Downloaded but not yet delivered fetch results
    73  	resultOffset uint64             // Offset of the first cached fetch result in the block chain
    74  	resultSize   common.StorageSize // Approximate size of a block (exponential moving average)
    75  
    76  	lock   *sync.Mutex
    77  	active *sync.Cond
    78  	closed bool
    79  }
    80  
    81  // newQueue creates a new download queue for scheduling block retrieval.
    82  func newQueue() *queue {
    83  	lock := new(sync.Mutex)
    84  	return &queue{
    85  		headerPendPool:   make(map[string]*fetchRequest),
    86  		headerContCh:     make(chan bool),
    87  		blockTaskPool:    make(map[types2.Hash]*dagmessage.SequencerHeader),
    88  		blockTaskQueue:   prque.New(),
    89  		blockPendPool:    make(map[string]*fetchRequest),
    90  		blockDonePool:    make(map[types2.Hash]struct{}),
    91  		receiptTaskPool:  make(map[types2.Hash]*dagmessage.SequencerHeader),
    92  		receiptTaskQueue: prque.New(),
    93  		receiptPendPool:  make(map[string]*fetchRequest),
    94  		receiptDonePool:  make(map[types2.Hash]struct{}),
    95  		resultCache:      make([]*fetchResult, blockCacheItems),
    96  		active:           sync.NewCond(lock),
    97  		lock:             lock,
    98  	}
    99  }
   100  
   101  // Reset clears out the queue contents.
   102  func (q *queue) Reset() {
   103  	q.lock.Lock()
   104  	defer q.lock.Unlock()
   105  
   106  	q.closed = false
   107  	q.mode = FullSync
   108  
   109  	q.headerHead = types2.Hash{}
   110  	q.headerPendPool = make(map[string]*fetchRequest)
   111  
   112  	q.blockTaskPool = make(map[types2.Hash]*dagmessage.SequencerHeader)
   113  	q.blockTaskQueue.Reset()
   114  	q.blockPendPool = make(map[string]*fetchRequest)
   115  	q.blockDonePool = make(map[types2.Hash]struct{})
   116  
   117  	q.receiptTaskPool = make(map[types2.Hash]*dagmessage.SequencerHeader)
   118  	q.receiptTaskQueue.Reset()
   119  	q.receiptPendPool = make(map[string]*fetchRequest)
   120  	q.receiptDonePool = make(map[types2.Hash]struct{})
   121  
   122  	q.resultCache = make([]*fetchResult, blockCacheItems)
   123  	q.resultOffset = 0
   124  }
   125  
   126  // Close marks the end of the sync, unblocking Results.
   127  // It may be called even if the queue is already closed.
   128  func (q *queue) Close() {
   129  	q.lock.Lock()
   130  	q.closed = true
   131  	q.lock.Unlock()
   132  	q.active.Broadcast()
   133  }
   134  
   135  // PendingHeaders retrieves the number of header requests pending for retrieval.
   136  func (q *queue) PendingHeaders() int {
   137  	q.lock.Lock()
   138  	defer q.lock.Unlock()
   139  
   140  	return q.headerTaskQueue.Size()
   141  }
   142  
   143  // PendingBlocks retrieves the number of block (body) requests pending for retrieval.
   144  func (q *queue) PendingBlocks() int {
   145  	q.lock.Lock()
   146  	defer q.lock.Unlock()
   147  
   148  	return q.blockTaskQueue.Size()
   149  }
   150  
   151  // PendingReceipts retrieves the number of block receipts pending for retrieval.
   152  func (q *queue) PendingReceipts() int {
   153  	q.lock.Lock()
   154  	defer q.lock.Unlock()
   155  
   156  	return q.receiptTaskQueue.Size()
   157  }
   158  
   159  // InFlightHeaders retrieves whether there are header fetch requests currently
   160  // in flight.
   161  func (q *queue) InFlightHeaders() bool {
   162  	q.lock.Lock()
   163  	defer q.lock.Unlock()
   164  
   165  	return len(q.headerPendPool) > 0
   166  }
   167  
   168  // InFlightBlocks retrieves whether there are block fetch requests currently in
   169  // flight.
   170  func (q *queue) InFlightBlocks() bool {
   171  	q.lock.Lock()
   172  	defer q.lock.Unlock()
   173  
   174  	return len(q.blockPendPool) > 0
   175  }
   176  
   177  // InFlightReceipts retrieves whether there are receipt fetch requests currently
   178  // in flight.
   179  func (q *queue) InFlightReceipts() bool {
   180  	q.lock.Lock()
   181  	defer q.lock.Unlock()
   182  
   183  	return len(q.receiptPendPool) > 0
   184  }
   185  
   186  // Idle returns if the queue is fully idle or has some data still inside.
   187  func (q *queue) Idle() bool {
   188  	q.lock.Lock()
   189  	defer q.lock.Unlock()
   190  
   191  	queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()
   192  	pending := len(q.blockPendPool) + len(q.receiptPendPool)
   193  	cached := len(q.blockDonePool) + len(q.receiptDonePool)
   194  
   195  	return (queued + pending + cached) == 0
   196  }
   197  
   198  // ShouldThrottleBlocks checks if the download should be throttled (active block (body)
   199  // fetches exceed block cache).
   200  func (q *queue) ShouldThrottleBlocks() bool {
   201  	q.lock.Lock()
   202  	defer q.lock.Unlock()
   203  
   204  	return q.resultSlots(q.blockPendPool, q.blockDonePool) <= 0
   205  }
   206  
   207  // ShouldThrottleReceipts checks if the download should be throttled (active receipt
   208  // fetches exceed block cache).
   209  func (q *queue) ShouldThrottleReceipts() bool {
   210  	q.lock.Lock()
   211  	defer q.lock.Unlock()
   212  
   213  	return q.resultSlots(q.receiptPendPool, q.receiptDonePool) <= 0
   214  }
   215  
   216  // resultSlots calculates the number of results slots available for requests
   217  // whilst adhering to both the item and the memory limit too of the results
   218  // cache.
   219  func (q *queue) resultSlots(pendPool map[string]*fetchRequest, donePool map[types2.Hash]struct{}) int {
   220  	// Calculate the maximum length capped by the memory limit
   221  	limit := len(q.resultCache)
   222  	if common.StorageSize(len(q.resultCache))*q.resultSize > common.StorageSize(blockCacheMemory) {
   223  		limit = int((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
   224  	}
   225  	// Calculate the number of slots already finished
   226  	finished := 0
   227  	for _, result := range q.resultCache[:limit] {
   228  		if result == nil {
   229  			break
   230  		}
   231  		if _, ok := donePool[result.Hash]; ok {
   232  			finished++
   233  		}
   234  	}
   235  	// Calculate the number of slots currently downloading
   236  	pending := 0
   237  	for _, request := range pendPool {
   238  		for _, header := range request.Headers {
   239  			if header.SequencerId() < q.resultOffset+uint64(limit) {
   240  				pending++
   241  			}
   242  		}
   243  	}
   244  	// Return the free slots to distribute
   245  	return limit - finished - pending
   246  }
   247  
   248  // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
   249  // up an already retrieved header skeleton.
   250  func (q *queue) ScheduleSkeleton(from uint64, skeleton []*dagmessage.SequencerHeader) {
   251  	q.lock.Lock()
   252  	defer q.lock.Unlock()
   253  
   254  	// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
   255  	if q.headerResults != nil {
   256  		panic("skeleton assembly already in progress")
   257  	}
   258  	// Schedule all the header retrieval tasks for the skeleton assembly
   259  	q.headerTaskPool = make(map[uint64]*dagmessage.SequencerHeader)
   260  	q.headerTaskQueue = prque.New()
   261  	q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
   262  	q.headerResults = make([]*dagmessage.SequencerHeader, len(skeleton)*MaxHeaderFetch)
   263  	q.headerProced = 0
   264  	q.headerOffset = from
   265  	q.headerContCh = make(chan bool, 1)
   266  
   267  	for i, header := range skeleton {
   268  		index := from + uint64(i*MaxHeaderFetch)
   269  
   270  		q.headerTaskPool[index] = header
   271  		q.headerTaskQueue.Push(index, -float32(index))
   272  	}
   273  }
   274  
   275  // RetrieveHeaders retrieves the header chain assemble based on the scheduled
   276  // skeleton.
   277  func (q *queue) RetrieveHeaders() ([]*dagmessage.SequencerHeader, int) {
   278  	q.lock.Lock()
   279  	defer q.lock.Unlock()
   280  
   281  	headers, proced := q.headerResults, q.headerProced
   282  	q.headerResults, q.headerProced = nil, 0
   283  
   284  	return headers, proced
   285  }
   286  
   287  // Schedule adds a set of headers for the download queue for scheduling, returning
   288  // the new headers encountered.
   289  func (q *queue) Schedule(headers []*dagmessage.SequencerHeader, from uint64) []*dagmessage.SequencerHeader {
   290  	q.lock.Lock()
   291  	defer q.lock.Unlock()
   292  
   293  	// Insert all the headers prioritised by the contained block number
   294  	inserts := make([]*dagmessage.SequencerHeader, 0, len(headers))
   295  	for _, header := range headers {
   296  		// Make sure chain order is honoured and preserved throughout
   297  		hash := header.GetHash()
   298  		if header.SequencerId() == 0 || header.SequencerId() != from {
   299  			log.WithField("number", header.SequencerId()).WithField("hash", hash).WithField(
   300  				"expected", from).Warn("Header broke chain ordering")
   301  			break
   302  		}
   303  
   304  		// Make sure no duplicate requests are executed
   305  		if _, ok := q.blockTaskPool[hash]; ok {
   306  			log.WithField("number", header.SequencerId()).WithField(
   307  				"hash", hash).Warn("Header  already scheduled for block fetch")
   308  			continue
   309  		}
   310  		if _, ok := q.receiptTaskPool[hash]; ok {
   311  			log.WithField("number", header.SequencerId()).WithField("hash", hash).Warn(
   312  				"Header already scheduled for receipt fetch", hash)
   313  			continue
   314  		}
   315  		// Queue the header for content retrieval
   316  		q.blockTaskPool[hash] = header
   317  		q.blockTaskQueue.Push(header, -float32(header.SequencerId()))
   318  
   319  		if q.mode == FastSync {
   320  			q.receiptTaskPool[hash] = header
   321  			q.receiptTaskQueue.Push(header, -float32(header.SequencerId()))
   322  		}
   323  		inserts = append(inserts, header)
   324  		q.headerHead = hash
   325  		from++
   326  	}
   327  	return inserts
   328  }
   329  
   330  // Results retrieves and permanently removes a batch of fetch results from
   331  // the cache. the result slice will be empty if the queue has been closed.
   332  func (q *queue) Results(block bool) []*fetchResult {
   333  	q.lock.Lock()
   334  	defer q.lock.Unlock()
   335  
   336  	// Count the number of items available for processing
   337  	nproc := q.countProcessableItems()
   338  	for nproc == 0 && !q.closed {
   339  		if !block {
   340  			return nil
   341  		}
   342  		q.active.Wait()
   343  		nproc = q.countProcessableItems()
   344  	}
   345  	// Since we have a batch limit, don't pull more into "dangling" memory
   346  	if nproc > maxResultsProcess {
   347  		nproc = maxResultsProcess
   348  	}
   349  	results := make([]*fetchResult, nproc)
   350  	copy(results, q.resultCache[:nproc])
   351  	if len(results) > 0 {
   352  		// Mark results as done before dropping them from the cache.
   353  		for _, result := range results {
   354  			hash := result.Header.GetHash()
   355  			delete(q.blockDonePool, hash)
   356  			delete(q.receiptDonePool, hash)
   357  		}
   358  		// Delete the results from the cache and clear the tail.
   359  		copy(q.resultCache, q.resultCache[nproc:])
   360  		for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ {
   361  			q.resultCache[i] = nil
   362  		}
   363  		// Advance the expected block number of the first cache entry.
   364  		q.resultOffset += uint64(nproc)
   365  
   366  		// Recalculate the result item weights to prevent memory exhaustion
   367  		for _, result := range results {
   368  			sizeInt := 0
   369  			for _, tx := range result.Transactions {
   370  				sizeInt += tx.Msgsize()
   371  			}
   372  			size := common.StorageSize(float64(sizeInt))
   373  			q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
   374  		}
   375  	}
   376  	return results
   377  }
   378  
   379  // countProcessableItems counts the processable items.
   380  func (q *queue) countProcessableItems() int {
   381  	for i, result := range q.resultCache {
   382  		if result == nil || result.Pending > 0 {
   383  			return i
   384  		}
   385  	}
   386  	return len(q.resultCache)
   387  }
   388  
   389  // ReserveHeaders reserves a set of headers for the given peer, skipping any
   390  // previously failed batches.
   391  func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
   392  	q.lock.Lock()
   393  	defer q.lock.Unlock()
   394  
   395  	// Short circuit if the peer's already downloading something (sanity check to
   396  	// not corrupt state)
   397  	if _, ok := q.headerPendPool[p.id]; ok {
   398  		return nil
   399  	}
   400  	// Retrieve a batch of hashes, skipping previously failed ones
   401  	send, skip := uint64(0), []uint64{}
   402  	for send == 0 && !q.headerTaskQueue.Empty() {
   403  		from, _ := q.headerTaskQueue.Pop()
   404  		if q.headerPeerMiss[p.id] != nil {
   405  			if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
   406  				skip = append(skip, from.(uint64))
   407  				continue
   408  			}
   409  		}
   410  		send = from.(uint64)
   411  	}
   412  	// Merge all the skipped batches back
   413  	for _, from := range skip {
   414  		q.headerTaskQueue.Push(from, -float32(from))
   415  	}
   416  	// Assemble and return the block download request
   417  	if send == 0 {
   418  		return nil
   419  	}
   420  	request := &fetchRequest{
   421  		Peer: p,
   422  		From: send,
   423  		Time: time.Now(),
   424  	}
   425  	q.headerPendPool[p.id] = request
   426  	return request
   427  }
   428  
   429  // ReserveBodies reserves a set of body fetches for the given peer, skipping any
   430  // previously failed downloads. Beside the next batch of needed fetches, it also
   431  // returns a flag whether empty blocks were queued requiring processing.
   432  func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, error) {
   433  	isNoop := func(header *dagmessage.SequencerHeader) bool {
   434  		hash := header.GetHash()
   435  		return hash.Empty()
   436  	}
   437  	q.lock.Lock()
   438  	defer q.lock.Unlock()
   439  
   440  	return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop)
   441  }
   442  
   443  // reserveHeaders reserves a set of data download operations for a given peer,
   444  // skipping any previously failed ones. This method is a generic version used
   445  // by the individual special reservation functions.
   446  //
   447  // Note, this method expects the queue lock to be already held for writing. The
   448  // reason the lock is not obtained in here is because the parameters already need
   449  // to access the queue, so they already need a lock anyway.
   450  func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[types2.Hash]*dagmessage.SequencerHeader, taskQueue *prque.Prque,
   451  	pendPool map[string]*fetchRequest, donePool map[types2.Hash]struct{}, isNoop func(*dagmessage.SequencerHeader) bool) (*fetchRequest, bool, error) {
   452  	// Short circuit if the pool has been depleted, or if the peer's already
   453  	// downloading something (sanity check not to corrupt state)
   454  	if taskQueue.Empty() {
   455  		return nil, false, nil
   456  	}
   457  	if _, ok := pendPool[p.id]; ok {
   458  		return nil, false, nil
   459  	}
   460  	// Calculate an upper limit on the items we might fetch (i.e. throttling)
   461  	space := q.resultSlots(pendPool, donePool)
   462  
   463  	// Retrieve a batch of tasks, skipping previously failed ones
   464  	send := make([]*dagmessage.SequencerHeader, 0, count)
   465  	skip := make([]*dagmessage.SequencerHeader, 0)
   466  
   467  	progress := false
   468  	for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ {
   469  		header := taskQueue.PopItem().(*dagmessage.SequencerHeader)
   470  		hash := header.GetHash()
   471  
   472  		// If we're the first to request this task, initialise the result container
   473  		index := int(header.SequencerId() - q.resultOffset)
   474  		if index >= len(q.resultCache) || index < 0 {
   475  			log.Debug("index allocation went beyond available resultCache space")
   476  			return nil, false, errInvalidChain
   477  		}
   478  		if q.resultCache[index] == nil {
   479  			components := 1
   480  			if q.mode == FastSync {
   481  				components = 2
   482  			}
   483  			q.resultCache[index] = &fetchResult{
   484  				Pending: components,
   485  				Hash:    hash,
   486  				Header:  header,
   487  			}
   488  		}
   489  		// If this fetch task is a noop, skip this fetch operation
   490  		if isNoop(header) {
   491  			donePool[hash] = struct{}{}
   492  			delete(taskPool, hash)
   493  
   494  			space, proc = space-1, proc-1
   495  			q.resultCache[index].Pending--
   496  			progress = true
   497  			continue
   498  		}
   499  		// Otherwise unless the peer is known not to have the data, add to the retrieve list
   500  		if p.Lacks(hash) {
   501  			skip = append(skip, header)
   502  		} else {
   503  			send = append(send, header)
   504  		}
   505  	}
   506  	// Merge all the skipped headers back
   507  	for _, header := range skip {
   508  		taskQueue.Push(header, -float32(header.SequencerId()))
   509  	}
   510  	if progress {
   511  		// Wake Results, resultCache was modified
   512  		q.active.Signal()
   513  	}
   514  	// Assemble and return the block download request
   515  	if len(send) == 0 {
   516  		return nil, progress, nil
   517  	}
   518  	request := &fetchRequest{
   519  		Peer:    p,
   520  		Headers: send,
   521  		Time:    time.Now(),
   522  	}
   523  	pendPool[p.id] = request
   524  
   525  	return request, progress, nil
   526  }
   527  
   528  // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
   529  func (q *queue) CancelHeaders(request *fetchRequest) {
   530  	q.cancel(request, q.headerTaskQueue, q.headerPendPool)
   531  }
   532  
   533  // CancelBodies aborts a body fetch request, returning all pending headers to the
   534  // task queue.
   535  func (q *queue) CancelBodies(request *fetchRequest) {
   536  	q.cancel(request, q.blockTaskQueue, q.blockPendPool)
   537  }
   538  
   539  // CancelReceipts aborts a body fetch request, returning all pending headers to
   540  // the task queue.
   541  func (q *queue) CancelReceipts(request *fetchRequest) {
   542  	q.cancel(request, q.receiptTaskQueue, q.receiptPendPool)
   543  }
   544  
   545  // Cancel aborts a fetch request, returning all pending hashes to the task queue.
   546  func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) {
   547  	q.lock.Lock()
   548  	defer q.lock.Unlock()
   549  
   550  	if request.From > 0 {
   551  		taskQueue.Push(request.From, -float32(request.From))
   552  	}
   553  	for _, header := range request.Headers {
   554  		taskQueue.Push(header, -float32(header.SequencerId()))
   555  	}
   556  	delete(pendPool, request.Peer.id)
   557  }
   558  
   559  // Revoke cancels all pending requests belonging to a given peer. This method is
   560  // meant to be called during a peer drop to quickly reassign owned data fetches
   561  // to remaining nodes.
   562  func (q *queue) Revoke(peerID string) {
   563  	q.lock.Lock()
   564  	defer q.lock.Unlock()
   565  
   566  	if request, ok := q.blockPendPool[peerID]; ok {
   567  		for _, header := range request.Headers {
   568  			q.blockTaskQueue.Push(header, -float32(header.SequencerId()))
   569  		}
   570  		delete(q.blockPendPool, peerID)
   571  	}
   572  	if request, ok := q.receiptPendPool[peerID]; ok {
   573  		for _, header := range request.Headers {
   574  			q.receiptTaskQueue.Push(header, -float32(header.SequencerId()))
   575  		}
   576  		delete(q.receiptPendPool, peerID)
   577  	}
   578  }
   579  
   580  // ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
   581  // canceling them and returning the responsible peers for penalisation.
   582  func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
   583  	q.lock.Lock()
   584  	defer q.lock.Unlock()
   585  
   586  	return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
   587  }
   588  
   589  // ExpireBodies checks for in flight block body requests that exceeded a timeout
   590  // allowance, canceling them and returning the responsible peers for penalisation.
   591  func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
   592  	q.lock.Lock()
   593  	defer q.lock.Unlock()
   594  
   595  	return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)
   596  }
   597  
   598  // expire is the generic check that move expired tasks from a pending pool back
   599  // into a task pool, returning all entities caught with expired tasks.
   600  //
   601  // Note, this method expects the queue lock to be already held. The
   602  // reason the lock is not obtained in here is because the parameters already need
   603  // to access the queue, so they already need a lock anyway.
   604  func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
   605  	// Iterate over the expired requests and return each to the queue
   606  	expiries := make(map[string]int)
   607  	for id, request := range pendPool {
   608  		if time.Since(request.Time) > timeout {
   609  			// Update the metrics with the timeout
   610  			timeoutMeter.Mark(1)
   611  
   612  			// Return any non satisfied requests to the pool
   613  			if request.From > 0 {
   614  				taskQueue.Push(request.From, -float32(request.From))
   615  			}
   616  			for _, header := range request.Headers {
   617  				taskQueue.Push(header, -float32(header.SequencerId()))
   618  			}
   619  			// Add the peer to the expiry report along the the number of failed requests
   620  			expiries[id] = len(request.Headers)
   621  		}
   622  	}
   623  	// Remove the expired requests from the pending pool
   624  	for id := range expiries {
   625  		delete(pendPool, id)
   626  	}
   627  	if len(expiries) != 0 {
   628  		log.WithField("ids", expiries).Debug("expire")
   629  	}
   630  
   631  	return expiries
   632  }
   633  
   634  // DeliverHeaders injects a header retrieval response into the header results
   635  // cache. This method either accepts all headers it received, or none of them
   636  // if they do not map correctly to the skeleton.
   637  //
   638  // If the headers are accepted, the method makes an attempt to deliver the set
   639  // of ready headers to the processor to keep the pipeline full. However it will
   640  // not block to prevent stalling other pending deliveries.
   641  func (q *queue) DeliverHeaders(id string, headers []*dagmessage.SequencerHeader, headerProcCh chan []*dagmessage.SequencerHeader) (int, error) {
   642  	q.lock.Lock()
   643  	defer q.lock.Unlock()
   644  
   645  	// Short circuit if the data was never requested
   646  	request := q.headerPendPool[id]
   647  	if request == nil {
   648  		log.WithError(errNoFetchesPending).Warn("headers")
   649  		return 0, errNoFetchesPending
   650  	}
   651  	headerReqTimer.UpdateSince(request.Time)
   652  	delete(q.headerPendPool, id)
   653  
   654  	// Ensure headers can be mapped onto the skeleton chain
   655  	target := q.headerTaskPool[request.From].GetHash()
   656  	clog := log.WithField("peer", id).WithField("from", request.From)
   657  	accepted := len(headers) == MaxHeaderFetch
   658  	if accepted {
   659  		if headers[0].SequencerId() != request.From {
   660  			clog.WithField("number", headers[0].SequencerId()).WithField(
   661  				"hash", headers[0].GetHash()).Trace("First header broke chain ordering")
   662  			accepted = false
   663  		} else if headers[len(headers)-1].GetHash() != target {
   664  			clog.WithField("number", headers[len(headers)-1].SequencerId()).WithField(
   665  				"hash", headers[len(headers)-1].GetHash()).WithField("expected", target).Trace(
   666  				"Last header broke skeleton structure")
   667  			accepted = false
   668  		}
   669  	}
   670  	if accepted {
   671  		for i, header := range headers[1:] {
   672  			hash := header.GetHash()
   673  			if want := request.From + 1 + uint64(i); header.SequencerId() != want {
   674  				clog.WithField("number", header.SequencerId()).WithField(
   675  					"hash", hash).WithField("expected", want).Warn(
   676  					"Header broke chain ordering")
   677  				accepted = false
   678  				break
   679  			}
   680  		}
   681  	}
   682  	// If the batch of headers wasn't accepted, mark as unavailable
   683  	if !accepted {
   684  		clog.Trace("Skeleton filling not accepted")
   685  
   686  		miss := q.headerPeerMiss[id]
   687  		if miss == nil {
   688  			q.headerPeerMiss[id] = make(map[uint64]struct{})
   689  			miss = q.headerPeerMiss[id]
   690  		}
   691  		miss[request.From] = struct{}{}
   692  
   693  		q.headerTaskQueue.Push(request.From, -float32(request.From))
   694  		//return 0, errors.New("delivery not accepted")
   695  		return 0, errNotAccepet
   696  	}
   697  	// Clean up a successful fetch and try to deliver any sub-results
   698  	copy(q.headerResults[request.From-q.headerOffset:], headers)
   699  	delete(q.headerTaskPool, request.From)
   700  
   701  	ready := 0
   702  	for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
   703  		ready += MaxHeaderFetch
   704  	}
   705  	if ready > 0 {
   706  		// Headers are ready for delivery, gather them and push forward (non blocking)
   707  		process := make([]*dagmessage.SequencerHeader, ready)
   708  		copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
   709  
   710  		select {
   711  		case headerProcCh <- process:
   712  			log.WithField("peer", id).WithField("count", len(process)).WithField(
   713  				"from", process[0].SequencerId()).Trace("Pre-scheduled new headers")
   714  			q.headerProced += len(process)
   715  		default:
   716  		}
   717  	}
   718  	// Check for termination and return
   719  	if len(q.headerTaskPool) == 0 {
   720  		q.headerContCh <- false
   721  	}
   722  	return len(headers), nil
   723  }
   724  
   725  // DeliverBodies injects a block body retrieval response into the results queue.
   726  // The method returns the number of blocks bodies accepted from the delivery and
   727  // also wakes any threads waiting for data delivery.
   728  func (q *queue) DeliverBodies(id string, txLists []types.Txis, sequencers []*types.Sequencer) (int, error) {
   729  	q.lock.Lock()
   730  	defer q.lock.Unlock()
   731  
   732  	reconstruct := func(header *dagmessage.SequencerHeader, index int, result *fetchResult) error {
   733  		seqHeader := sequencers[index].GetHead()
   734  		if !header.Equal(seqHeader) {
   735  			log.WithField(" requested header", header.StringFull()).WithField("response seq", seqHeader.StringFull()).Warn(
   736  				" request header and response seq mismatch")
   737  			return errInvalidBody
   738  		}
   739  		result.Transactions = txLists[index]
   740  		result.Sequencer = sequencers[index]
   741  		return nil
   742  	}
   743  	return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct)
   744  }
   745  
   746  // deliver injects a data retrieval response into the results queue.
   747  //
   748  // Note, this method expects the queue lock to be already held for writing. The
   749  // reason the lock is not obtained in here is because the parameters already need
   750  // to access the queue, so they already need a lock anyway.
   751  func (q *queue) deliver(id string, taskPool map[types2.Hash]*dagmessage.SequencerHeader, taskQueue *prque.Prque,
   752  	pendPool map[string]*fetchRequest, donePool map[types2.Hash]struct{}, reqTimer metrics.Timer,
   753  	results int, reconstruct func(header *dagmessage.SequencerHeader, index int, result *fetchResult) error) (int, error) {
   754  
   755  	// Short circuit if the data was never requested
   756  	request := pendPool[id]
   757  	if request == nil {
   758  		log.WithField("reuqest id ", id).WithError(errNoFetchesPending).Warn("deliver")
   759  		return 0, errNoFetchesPending
   760  	}
   761  	reqTimer.UpdateSince(request.Time)
   762  	delete(pendPool, id)
   763  
   764  	// If no data items were retrieved, mark them as unavailable for the origin peer
   765  	if results == 0 {
   766  		for _, header := range request.Headers {
   767  			request.Peer.MarkLacking(header.GetHash())
   768  		}
   769  	}
   770  	// Assemble each of the results with their headers and retrieved data parts
   771  	var (
   772  		accepted int
   773  		failure  error
   774  		useful   bool
   775  	)
   776  	for i, header := range request.Headers {
   777  		// Short circuit assembly if no more fetch results are found
   778  		if i >= results {
   779  			break
   780  		}
   781  		// Reconstruct the next result if contents match up
   782  		index := int(header.SequencerId() - q.resultOffset)
   783  		if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil {
   784  			failure = errInvalidChain
   785  			break
   786  		}
   787  		if err := reconstruct(header, i, q.resultCache[index]); err != nil {
   788  			failure = err
   789  			break
   790  		}
   791  		hash := header.GetHash()
   792  
   793  		donePool[hash] = struct{}{}
   794  		q.resultCache[index].Pending--
   795  		useful = true
   796  		accepted++
   797  
   798  		// Clean up a successful fetch
   799  		request.Headers[i] = nil
   800  		delete(taskPool, hash)
   801  	}
   802  	// Return all failed or missing fetches to the queue
   803  	for _, header := range request.Headers {
   804  		if header != nil {
   805  			taskQueue.Push(header, -float32(header.SequencerId()))
   806  		}
   807  	}
   808  	// Wake up Results
   809  	if accepted > 0 {
   810  		q.active.Signal()
   811  	}
   812  	// If none of the data was good, it's a stale delivery
   813  	switch {
   814  	case failure == nil || failure == errInvalidChain:
   815  		return accepted, failure
   816  	case useful:
   817  		return accepted, fmt.Errorf("partial failure: %v", failure)
   818  	default:
   819  		return accepted, errStaleDelivery
   820  	}
   821  }
   822  
   823  // Prepare configures the result cache to allow accepting and caching inbound
   824  // fetch results.
   825  func (q *queue) Prepare(offset uint64, mode SyncMode) {
   826  	q.lock.Lock()
   827  	defer q.lock.Unlock()
   828  
   829  	// Prepare the queue for sync results
   830  	if q.resultOffset < offset {
   831  		q.resultOffset = offset
   832  	}
   833  	q.mode = mode
   834  }