github.com/nspcc-dev/neo-go@v0.105.2-0.20240517133400-6be757af3eba/pkg/core/headerhashes.go (about)

     1  package core
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  
     7  	lru "github.com/hashicorp/golang-lru/v2"
     8  	"github.com/nspcc-dev/neo-go/pkg/core/block"
     9  	"github.com/nspcc-dev/neo-go/pkg/core/dao"
    10  	"github.com/nspcc-dev/neo-go/pkg/util"
    11  )
    12  
    13  const (
    14  	headerBatchCount = 2000
    15  	pagesCache       = 8
    16  )
    17  
    18  // HeaderHashes is a header hash manager part of the Blockchain. It can't be used
    19  // without Blockchain.
    20  type HeaderHashes struct {
    21  	// Backing storage.
    22  	dao *dao.Simple
    23  
    24  	// Lock for all internal state fields.
    25  	lock sync.RWMutex
    26  
    27  	// The latest header hashes (storedHeaderCount+).
    28  	latest []util.Uint256
    29  
    30  	// Previously completed page of header hashes (pre-storedHeaderCount).
    31  	previous []util.Uint256
    32  
    33  	// Number of headers stored in the chain file.
    34  	storedHeaderCount uint32
    35  
    36  	// Cache for accessed pages of header hashes.
    37  	cache *lru.Cache[uint32, []util.Uint256]
    38  }
    39  
    40  func (h *HeaderHashes) initGenesis(dao *dao.Simple, hash util.Uint256) {
    41  	h.dao = dao
    42  	h.cache, _ = lru.New[uint32, []util.Uint256](pagesCache) // Never errors for positive size.
    43  	h.previous = make([]util.Uint256, headerBatchCount)
    44  	h.latest = make([]util.Uint256, 0, headerBatchCount)
    45  	h.latest = append(h.latest, hash)
    46  	dao.PutCurrentHeader(hash, 0)
    47  }
    48  
    49  func (h *HeaderHashes) init(dao *dao.Simple) error {
    50  	h.dao = dao
    51  	h.cache, _ = lru.New[uint32, []util.Uint256](pagesCache) // Never errors for positive size.
    52  
    53  	currHeaderHeight, currHeaderHash, err := h.dao.GetCurrentHeaderHeight()
    54  	if err != nil {
    55  		return fmt.Errorf("failed to retrieve current header info: %w", err)
    56  	}
    57  	h.storedHeaderCount = ((currHeaderHeight + 1) / headerBatchCount) * headerBatchCount
    58  
    59  	if h.storedHeaderCount >= headerBatchCount {
    60  		h.previous, err = h.dao.GetHeaderHashes(h.storedHeaderCount - headerBatchCount)
    61  		if err != nil {
    62  			return fmt.Errorf("failed to retrieve header hash page %d: %w", h.storedHeaderCount-headerBatchCount, err)
    63  		}
    64  	} else {
    65  		h.previous = make([]util.Uint256, headerBatchCount)
    66  	}
    67  	h.latest = make([]util.Uint256, 0, headerBatchCount)
    68  
    69  	// There is a high chance that the Node is stopped before the next
    70  	// batch of 2000 headers was stored. Via the currentHeaders stored we can sync
    71  	// that with stored blocks.
    72  	if currHeaderHeight >= h.storedHeaderCount {
    73  		hash := currHeaderHash
    74  		var targetHash util.Uint256
    75  		if h.storedHeaderCount >= headerBatchCount {
    76  			targetHash = h.previous[len(h.previous)-1]
    77  		}
    78  		headers := make([]util.Uint256, 0, headerBatchCount)
    79  
    80  		for hash != targetHash {
    81  			blk, err := h.dao.GetBlock(hash)
    82  			if err != nil {
    83  				return fmt.Errorf("could not get header %s: %w", hash, err)
    84  			}
    85  			headers = append(headers, blk.Hash())
    86  			hash = blk.PrevHash
    87  		}
    88  		hashSliceReverse(headers)
    89  		h.latest = append(h.latest, headers...)
    90  	}
    91  	return nil
    92  }
    93  
    94  func (h *HeaderHashes) lastHeaderIndex() uint32 {
    95  	return h.storedHeaderCount + uint32(len(h.latest)) - 1
    96  }
    97  
    98  // HeaderHeight returns the index/height of the highest header.
    99  func (h *HeaderHashes) HeaderHeight() uint32 {
   100  	h.lock.RLock()
   101  	n := h.lastHeaderIndex()
   102  	h.lock.RUnlock()
   103  	return n
   104  }
   105  
   106  func (h *HeaderHashes) addHeaders(headers ...*block.Header) error {
   107  	var (
   108  		batch      = h.dao.GetPrivate()
   109  		lastHeader *block.Header
   110  		err        error
   111  	)
   112  
   113  	h.lock.Lock()
   114  	defer h.lock.Unlock()
   115  
   116  	for _, head := range headers {
   117  		if head.Index != h.lastHeaderIndex()+1 {
   118  			continue
   119  		}
   120  		err = batch.StoreHeader(head)
   121  		if err != nil {
   122  			return err
   123  		}
   124  		lastHeader = head
   125  		h.latest = append(h.latest, head.Hash())
   126  		if len(h.latest) == headerBatchCount {
   127  			err = batch.StoreHeaderHashes(h.latest, h.storedHeaderCount)
   128  			if err != nil {
   129  				return err
   130  			}
   131  			copy(h.previous, h.latest)
   132  			h.latest = h.latest[:0]
   133  			h.storedHeaderCount += headerBatchCount
   134  		}
   135  	}
   136  	if lastHeader != nil {
   137  		batch.PutCurrentHeader(lastHeader.Hash(), lastHeader.Index)
   138  		updateHeaderHeightMetric(lastHeader.Index)
   139  		if _, err = batch.Persist(); err != nil {
   140  			return err
   141  		}
   142  	}
   143  	return nil
   144  }
   145  
   146  // CurrentHeaderHash returns the hash of the latest known header.
   147  func (h *HeaderHashes) CurrentHeaderHash() util.Uint256 {
   148  	var hash util.Uint256
   149  
   150  	h.lock.RLock()
   151  	if len(h.latest) > 0 {
   152  		hash = h.latest[len(h.latest)-1]
   153  	} else {
   154  		hash = h.previous[len(h.previous)-1]
   155  	}
   156  	h.lock.RUnlock()
   157  	return hash
   158  }
   159  
   160  // GetHeaderHash returns hash of the header/block with specified index, if
   161  // HeaderHashes doesn't have a hash for this height, zero Uint256 value is returned.
   162  func (h *HeaderHashes) GetHeaderHash(i uint32) util.Uint256 {
   163  	h.lock.RLock()
   164  	res, ok := h.getLocalHeaderHash(i)
   165  	h.lock.RUnlock()
   166  	if ok {
   167  		return res
   168  	}
   169  	// If it's not in the latest/previous, then it's in the cache or DB, those
   170  	// need no additional locks.
   171  	page := (i / headerBatchCount) * headerBatchCount
   172  	hashes, ok := h.cache.Get(page)
   173  	if ok {
   174  		return hashes[i-page]
   175  	}
   176  	hashes, err := h.dao.GetHeaderHashes(page)
   177  	if err != nil {
   178  		return util.Uint256{}
   179  	}
   180  	_ = h.cache.Add(page, hashes)
   181  	return hashes[i-page]
   182  }
   183  
   184  // getLocalHeaderHash looks for the index in the latest and previous caches.
   185  // Locking is left to the user.
   186  func (h *HeaderHashes) getLocalHeaderHash(i uint32) (util.Uint256, bool) {
   187  	if i > h.lastHeaderIndex() {
   188  		return util.Uint256{}, false
   189  	}
   190  	if i >= h.storedHeaderCount {
   191  		return h.latest[i-h.storedHeaderCount], true
   192  	}
   193  	previousStored := h.storedHeaderCount - headerBatchCount
   194  	if i >= previousStored {
   195  		return h.previous[i-previousStored], true
   196  	}
   197  	return util.Uint256{}, false
   198  }
   199  
   200  func (h *HeaderHashes) haveRecentHash(hash util.Uint256, i uint32) bool {
   201  	h.lock.RLock()
   202  	defer h.lock.RUnlock()
   203  	for ; i > 0; i-- {
   204  		lh, ok := h.getLocalHeaderHash(i)
   205  		if ok && hash.Equals(lh) {
   206  			return true
   207  		}
   208  	}
   209  	return false
   210  }