github.com/carter-ya/go-ethereum@v0.0.0-20230628080049-d2309be3983b/eth/downloader/resultstore.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package downloader
    18  
    19  import (
    20  	"fmt"
    21  	"sync"
    22  	"sync/atomic"
    23  
    24  	"github.com/ethereum/go-ethereum/core/types"
    25  )
    26  
    27  // resultStore implements a structure for maintaining fetchResults, tracking their
    28  // download-progress and delivering (finished) results.
    29  type resultStore struct {
    30  	items        []*fetchResult // Downloaded but not yet delivered fetch results
    31  	resultOffset uint64         // Offset of the first cached fetch result in the block chain
    32  
    33  	// Internal index of first non-completed entry, updated atomically when needed.
    34  	// If all items are complete, this will equal length(items), so
    35  	// *important* : is not safe to use for indexing without checking against length
    36  	indexIncomplete int32 // atomic access
    37  
    38  	// throttleThreshold is the limit up to which we _want_ to fill the
    39  	// results. If blocks are large, we want to limit the results to less
    40  	// than the number of available slots, and maybe only fill 1024 out of
    41  	// 8192 possible places. The queue will, at certain times, recalibrate
    42  	// this index.
    43  	throttleThreshold uint64
    44  
    45  	lock sync.RWMutex
    46  }
    47  
    48  func newResultStore(size int) *resultStore {
    49  	return &resultStore{
    50  		resultOffset:      0,
    51  		items:             make([]*fetchResult, size),
    52  		throttleThreshold: uint64(size),
    53  	}
    54  }
    55  
    56  // SetThrottleThreshold updates the throttling threshold based on the requested
    57  // limit and the total queue capacity. It returns the (possibly capped) threshold
    58  func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
    59  	r.lock.Lock()
    60  	defer r.lock.Unlock()
    61  
    62  	limit := uint64(len(r.items))
    63  	if threshold >= limit {
    64  		threshold = limit
    65  	}
    66  	r.throttleThreshold = threshold
    67  	return r.throttleThreshold
    68  }
    69  
    70  // AddFetch adds a header for body/receipt fetching. This is used when the queue
    71  // wants to reserve headers for fetching.
    72  //
    73  // It returns the following:
    74  //
    75  //	stale     - if true, this item is already passed, and should not be requested again
    76  //	throttled - if true, the store is at capacity, this particular header is not prio now
    77  //	item      - the result to store data into
    78  //	err       - any error that occurred
    79  func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
    80  	r.lock.Lock()
    81  	defer r.lock.Unlock()
    82  
    83  	var index int
    84  	item, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64())
    85  	if err != nil || stale || throttled {
    86  		return stale, throttled, item, err
    87  	}
    88  	if item == nil {
    89  		item = newFetchResult(header, fastSync)
    90  		r.items[index] = item
    91  	}
    92  	return stale, throttled, item, err
    93  }
    94  
    95  // GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag
    96  // is true, that means the header has already been delivered 'upstream'. This method
    97  // does not bubble up the 'throttle' flag, since it's moot at the point in time when
    98  // the item is downloaded and ready for delivery
    99  func (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) {
   100  	r.lock.RLock()
   101  	defer r.lock.RUnlock()
   102  
   103  	res, _, stale, _, err := r.getFetchResult(headerNumber)
   104  	return res, stale, err
   105  }
   106  
   107  // getFetchResult returns the fetchResult corresponding to the given item, and
   108  // the index where the result is stored.
   109  func (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) {
   110  	index = int(int64(headerNumber) - int64(r.resultOffset))
   111  	throttle = index >= int(r.throttleThreshold)
   112  	stale = index < 0
   113  
   114  	if index >= len(r.items) {
   115  		err = fmt.Errorf("%w: index allocation went beyond available resultStore space "+
   116  			"(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d", errInvalidChain,
   117  			index, headerNumber, r.resultOffset, len(r.items))
   118  		return nil, index, stale, throttle, err
   119  	}
   120  	if stale {
   121  		return nil, index, stale, throttle, nil
   122  	}
   123  	item = r.items[index]
   124  	return item, index, stale, throttle, nil
   125  }
   126  
   127  // hasCompletedItems returns true if there are processable items available
   128  // this method is cheaper than countCompleted
   129  func (r *resultStore) HasCompletedItems() bool {
   130  	r.lock.RLock()
   131  	defer r.lock.RUnlock()
   132  
   133  	if len(r.items) == 0 {
   134  		return false
   135  	}
   136  	if item := r.items[0]; item != nil && item.AllDone() {
   137  		return true
   138  	}
   139  	return false
   140  }
   141  
   142  // countCompleted returns the number of items ready for delivery, stopping at
   143  // the first non-complete item.
   144  //
   145  // The mthod assumes (at least) rlock is held.
   146  func (r *resultStore) countCompleted() int {
   147  	// We iterate from the already known complete point, and see
   148  	// if any more has completed since last count
   149  	index := atomic.LoadInt32(&r.indexIncomplete)
   150  	for ; ; index++ {
   151  		if index >= int32(len(r.items)) {
   152  			break
   153  		}
   154  		result := r.items[index]
   155  		if result == nil || !result.AllDone() {
   156  			break
   157  		}
   158  	}
   159  	atomic.StoreInt32(&r.indexIncomplete, index)
   160  	return int(index)
   161  }
   162  
   163  // GetCompleted returns the next batch of completed fetchResults
   164  func (r *resultStore) GetCompleted(limit int) []*fetchResult {
   165  	r.lock.Lock()
   166  	defer r.lock.Unlock()
   167  
   168  	completed := r.countCompleted()
   169  	if limit > completed {
   170  		limit = completed
   171  	}
   172  	results := make([]*fetchResult, limit)
   173  	copy(results, r.items[:limit])
   174  
   175  	// Delete the results from the cache and clear the tail.
   176  	copy(r.items, r.items[limit:])
   177  	for i := len(r.items) - limit; i < len(r.items); i++ {
   178  		r.items[i] = nil
   179  	}
   180  	// Advance the expected block number of the first cache entry
   181  	r.resultOffset += uint64(limit)
   182  	atomic.AddInt32(&r.indexIncomplete, int32(-limit))
   183  
   184  	return results
   185  }
   186  
   187  // Prepare initialises the offset with the given block number
   188  func (r *resultStore) Prepare(offset uint64) {
   189  	r.lock.Lock()
   190  	defer r.lock.Unlock()
   191  
   192  	if r.resultOffset < offset {
   193  		r.resultOffset = offset
   194  	}
   195  }