github.com/klaytn/klaytn@v1.10.2/datasync/downloader/resultstore.go (about)

     1  // Modifications Copyright 2020 The klaytn Authors
     2  // Copyright 2019 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from eth/downloader/resultstore.go (2020/07/24).
    19  // Modified and improved for the klaytn development.
    20  
    21  package downloader
    22  
    23  import (
    24  	"fmt"
    25  	"sync"
    26  	"sync/atomic"
    27  
    28  	"github.com/klaytn/klaytn/blockchain/types"
    29  )
    30  
    31  // resultStore implements a structure for maintaining fetchResults, tracking their
    32  // download-progress and delivering (finished) results.
    33  type resultStore struct {
    34  	items        []*fetchResult // Downloaded but not yet delivered fetch results
    35  	resultOffset uint64         // Offset of the first cached fetch result in the block chain
    36  
    37  	// Internal index of first non-completed entry, updated atomically when needed.
    38  	// If all items are complete, this will equal length(items), so
    39  	// *important* : is not safe to use for indexing without checking against length
    40  	indexIncomplete int32 // atomic access
    41  
    42  	// throttleThreshold is the limit up to which we _want_ to fill the
    43  	// results. If blocks are large, we want to limit the results to less
    44  	// than the number of available slots, and maybe only fill 1024 out of
    45  	// 8192 possible places. The queue will, at certain times, recalibrate
    46  	// this index.
    47  	throttleThreshold uint64
    48  
    49  	lock sync.RWMutex
    50  }
    51  
    52  func newResultStore(size int) *resultStore {
    53  	return &resultStore{
    54  		resultOffset:      0,
    55  		items:             make([]*fetchResult, size),
    56  		throttleThreshold: uint64(size),
    57  	}
    58  }
    59  
    60  // SetThrottleThreshold updates the throttling threshold based on the requested
    61  // limit and the total queue capacity. It returns the (possibly capped) threshold
    62  func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
    63  	r.lock.Lock()
    64  	defer r.lock.Unlock()
    65  
    66  	limit := uint64(len(r.items))
    67  	if threshold >= limit {
    68  		threshold = limit
    69  	}
    70  	r.throttleThreshold = threshold
    71  	return r.throttleThreshold
    72  }
    73  
    74  // AddFetch adds a header for body/receipt fetching. This is used when the queue
    75  // wants to reserve headers for fetching.
    76  //
    77  // It returns the following:
    78  //   stale     - if true, this item is already passed, and should not be requested again
    79  //   throttled - if true, the store is at capacity, this particular header is not prior now
    80  //   item      - the result to store data into
    81  //   err       - any error that occurred
    82  func (r *resultStore) AddFetch(header *types.Header, mode SyncMode, proposerPolicy uint64) (stale, throttled bool, item *fetchResult, err error) {
    83  	r.lock.Lock()
    84  	defer r.lock.Unlock()
    85  
    86  	var index int
    87  	item, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64())
    88  	if err != nil || stale || throttled {
    89  		return stale, throttled, item, err
    90  	}
    91  	if item == nil {
    92  		item = newFetchResult(header, mode, proposerPolicy)
    93  		r.items[index] = item
    94  	}
    95  	return stale, throttled, item, err
    96  }
    97  
    98  // GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag
    99  // is true, that means the header has already been delivered 'upstream'. This method
   100  // does not bubble up the 'throttle' flag, since it's moot at the point in time when
   101  // the item is downloaded and ready for delivery
   102  func (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) {
   103  	r.lock.RLock()
   104  	defer r.lock.RUnlock()
   105  
   106  	res, _, stale, _, err := r.getFetchResult(headerNumber)
   107  	return res, stale, err
   108  }
   109  
   110  // getFetchResult returns the fetchResult corresponding to the given item, and
   111  // the index where the result is stored.
   112  func (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) {
   113  	index = int(int64(headerNumber) - int64(r.resultOffset))
   114  	throttle = index >= int(r.throttleThreshold)
   115  	stale = index < 0
   116  
   117  	if index >= len(r.items) {
   118  		err = fmt.Errorf("%w: index allocation went beyond available resultStore space "+
   119  			"(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d", errInvalidChain,
   120  			index, headerNumber, r.resultOffset, len(r.items))
   121  		return nil, index, stale, throttle, err
   122  	}
   123  	if stale {
   124  		return nil, index, stale, throttle, nil
   125  	}
   126  	item = r.items[index]
   127  	return item, index, stale, throttle, nil
   128  }
   129  
   130  // HasCompletedItems returns true if there are processable items available
   131  // this method is cheaper than countCompleted
   132  func (r *resultStore) HasCompletedItems() bool {
   133  	r.lock.RLock()
   134  	defer r.lock.RUnlock()
   135  
   136  	if len(r.items) == 0 {
   137  		return false
   138  	}
   139  	if item := r.items[0]; item != nil && item.AllDone() {
   140  		return true
   141  	}
   142  	return false
   143  }
   144  
   145  // countCompleted returns the number of items ready for delivery, stopping at
   146  // the first non-complete item.
   147  //
   148  // The method assumes (at least) rlock is held.
   149  func (r *resultStore) countCompleted() int {
   150  	// We iterate from the already known complete point, and see
   151  	// if any more has completed since last count
   152  	index := atomic.LoadInt32(&r.indexIncomplete)
   153  	for ; ; index++ {
   154  		if index >= int32(len(r.items)) {
   155  			break
   156  		}
   157  		result := r.items[index]
   158  		if result == nil || !result.AllDone() {
   159  			break
   160  		}
   161  	}
   162  	atomic.StoreInt32(&r.indexIncomplete, index)
   163  	return int(index)
   164  }
   165  
   166  // GetCompleted returns the next batch of completed fetchResults
   167  func (r *resultStore) GetCompleted(limit int) []*fetchResult {
   168  	r.lock.Lock()
   169  	defer r.lock.Unlock()
   170  
   171  	completed := r.countCompleted()
   172  	if limit > completed {
   173  		limit = completed
   174  	}
   175  	results := make([]*fetchResult, limit)
   176  	copy(results, r.items[:limit])
   177  
   178  	// Delete the results from the cache and clear the tail.
   179  	copy(r.items, r.items[limit:])
   180  	for i := len(r.items) - limit; i < len(r.items); i++ {
   181  		r.items[i] = nil
   182  	}
   183  	// Advance the expected block number of the first cache entry
   184  	r.resultOffset += uint64(limit)
   185  	atomic.AddInt32(&r.indexIncomplete, int32(-limit))
   186  
   187  	return results
   188  }
   189  
   190  // Prepare initialises the offset with the given block number
   191  func (r *resultStore) Prepare(offset uint64) {
   192  	r.lock.Lock()
   193  	defer r.lock.Unlock()
   194  
   195  	if r.resultOffset < offset {
   196  		r.resultOffset = offset
   197  	}
   198  }