github.com/MetalBlockchain/metalgo@v1.11.9/vms/platformvm/state/state.go (about)

     1  // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package state
     5  
     6  import (
     7  	"context"
     8  	"errors"
     9  	"fmt"
    10  	"math"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/google/btree"
    15  	"github.com/prometheus/client_golang/prometheus"
    16  	"go.uber.org/zap"
    17  
    18  	"github.com/MetalBlockchain/metalgo/cache"
    19  	"github.com/MetalBlockchain/metalgo/cache/metercacher"
    20  	"github.com/MetalBlockchain/metalgo/database"
    21  	"github.com/MetalBlockchain/metalgo/database/linkeddb"
    22  	"github.com/MetalBlockchain/metalgo/database/prefixdb"
    23  	"github.com/MetalBlockchain/metalgo/database/versiondb"
    24  	"github.com/MetalBlockchain/metalgo/ids"
    25  	"github.com/MetalBlockchain/metalgo/snow"
    26  	"github.com/MetalBlockchain/metalgo/snow/choices"
    27  	"github.com/MetalBlockchain/metalgo/snow/uptime"
    28  	"github.com/MetalBlockchain/metalgo/snow/validators"
    29  	"github.com/MetalBlockchain/metalgo/utils/constants"
    30  	"github.com/MetalBlockchain/metalgo/utils/crypto/bls"
    31  	"github.com/MetalBlockchain/metalgo/utils/hashing"
    32  	"github.com/MetalBlockchain/metalgo/utils/logging"
    33  	"github.com/MetalBlockchain/metalgo/utils/timer"
    34  	"github.com/MetalBlockchain/metalgo/utils/wrappers"
    35  	"github.com/MetalBlockchain/metalgo/vms/components/avax"
    36  	"github.com/MetalBlockchain/metalgo/vms/platformvm/block"
    37  	"github.com/MetalBlockchain/metalgo/vms/platformvm/config"
    38  	"github.com/MetalBlockchain/metalgo/vms/platformvm/fx"
    39  	"github.com/MetalBlockchain/metalgo/vms/platformvm/genesis"
    40  	"github.com/MetalBlockchain/metalgo/vms/platformvm/metrics"
    41  	"github.com/MetalBlockchain/metalgo/vms/platformvm/reward"
    42  	"github.com/MetalBlockchain/metalgo/vms/platformvm/status"
    43  	"github.com/MetalBlockchain/metalgo/vms/platformvm/txs"
    44  
    45  	safemath "github.com/MetalBlockchain/metalgo/utils/math"
    46  )
    47  
    48  const (
    49  	indexIterationLimit           = 4096
    50  	indexIterationSleepMultiplier = 5
    51  	indexIterationSleepCap        = 10 * time.Second
    52  	indexLogFrequency             = 30 * time.Second
    53  )
    54  
    55  var (
    56  	_ State = (*state)(nil)
    57  
    58  	errValidatorSetAlreadyPopulated = errors.New("validator set already populated")
    59  	errIsNotSubnet                  = errors.New("is not a subnet")
    60  
    61  	BlockIDPrefix                 = []byte("blockID")
    62  	BlockPrefix                   = []byte("block")
    63  	ValidatorsPrefix              = []byte("validators")
    64  	CurrentPrefix                 = []byte("current")
    65  	PendingPrefix                 = []byte("pending")
    66  	ValidatorPrefix               = []byte("validator")
    67  	DelegatorPrefix               = []byte("delegator")
    68  	SubnetValidatorPrefix         = []byte("subnetValidator")
    69  	SubnetDelegatorPrefix         = []byte("subnetDelegator")
    70  	ValidatorWeightDiffsPrefix    = []byte("flatValidatorDiffs")
    71  	ValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs")
    72  	TxPrefix                      = []byte("tx")
    73  	RewardUTXOsPrefix             = []byte("rewardUTXOs")
    74  	UTXOPrefix                    = []byte("utxo")
    75  	SubnetPrefix                  = []byte("subnet")
    76  	SubnetOwnerPrefix             = []byte("subnetOwner")
    77  	TransformedSubnetPrefix       = []byte("transformedSubnet")
    78  	SupplyPrefix                  = []byte("supply")
    79  	ChainPrefix                   = []byte("chain")
    80  	SingletonPrefix               = []byte("singleton")
    81  
    82  	TimestampKey       = []byte("timestamp")
    83  	CurrentSupplyKey   = []byte("current supply")
    84  	LastAcceptedKey    = []byte("last accepted")
    85  	HeightsIndexedKey  = []byte("heights indexed")
    86  	InitializedKey     = []byte("initialized")
    87  	BlocksReindexedKey = []byte("blocks reindexed")
    88  )
    89  
    90  // Chain collects all methods to manage the state of the chain for block
    91  // execution.
    92  type Chain interface {
    93  	Stakers
    94  	avax.UTXOAdder
    95  	avax.UTXOGetter
    96  	avax.UTXODeleter
    97  
    98  	GetTimestamp() time.Time
    99  	SetTimestamp(tm time.Time)
   100  
   101  	GetCurrentSupply(subnetID ids.ID) (uint64, error)
   102  	SetCurrentSupply(subnetID ids.ID, cs uint64)
   103  
   104  	AddRewardUTXO(txID ids.ID, utxo *avax.UTXO)
   105  
   106  	AddSubnet(subnetID ids.ID)
   107  
   108  	GetSubnetOwner(subnetID ids.ID) (fx.Owner, error)
   109  	SetSubnetOwner(subnetID ids.ID, owner fx.Owner)
   110  
   111  	GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error)
   112  	AddSubnetTransformation(transformSubnetTx *txs.Tx)
   113  
   114  	AddChain(createChainTx *txs.Tx)
   115  
   116  	GetTx(txID ids.ID) (*txs.Tx, status.Status, error)
   117  	AddTx(tx *txs.Tx, status status.Status)
   118  }
   119  
   120  type State interface {
   121  	Chain
   122  	uptime.State
   123  	avax.UTXOReader
   124  
   125  	GetLastAccepted() ids.ID
   126  	SetLastAccepted(blkID ids.ID)
   127  
   128  	GetStatelessBlock(blockID ids.ID) (block.Block, error)
   129  
   130  	// Invariant: [block] is an accepted block.
   131  	AddStatelessBlock(block block.Block)
   132  
   133  	GetBlockIDAtHeight(height uint64) (ids.ID, error)
   134  
   135  	GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error)
   136  	GetSubnetIDs() ([]ids.ID, error)
   137  	GetChains(subnetID ids.ID) ([]*txs.Tx, error)
   138  
   139  	// ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis
   140  	// block until it has applied all of the diffs up to and including
   141  	// [endHeight]. Applying the diffs modifies [validators].
   142  	//
   143  	// Invariant: If attempting to generate the validator set for
   144  	// [endHeight - 1], [validators] must initially contain the validator
   145  	// weights for [startHeight].
   146  	//
   147  	// Note: Because this function iterates towards the genesis, [startHeight]
   148  	// will typically be greater than or equal to [endHeight]. If [startHeight]
   149  	// is less than [endHeight], no diffs will be applied.
   150  	ApplyValidatorWeightDiffs(
   151  		ctx context.Context,
   152  		validators map[ids.NodeID]*validators.GetValidatorOutput,
   153  		startHeight uint64,
   154  		endHeight uint64,
   155  		subnetID ids.ID,
   156  	) error
   157  
   158  	// ApplyValidatorPublicKeyDiffs iterates from [startHeight] towards the
   159  	// genesis block until it has applied all of the diffs up to and including
   160  	// [endHeight]. Applying the diffs modifies [validators].
   161  	//
   162  	// Invariant: If attempting to generate the validator set for
   163  	// [endHeight - 1], [validators] must initially contain the validator
   164  	// weights for [startHeight].
   165  	//
   166  	// Note: Because this function iterates towards the genesis, [startHeight]
   167  	// will typically be greater than or equal to [endHeight]. If [startHeight]
   168  	// is less than [endHeight], no diffs will be applied.
   169  	ApplyValidatorPublicKeyDiffs(
   170  		ctx context.Context,
   171  		validators map[ids.NodeID]*validators.GetValidatorOutput,
   172  		startHeight uint64,
   173  		endHeight uint64,
   174  	) error
   175  
   176  	SetHeight(height uint64)
   177  
   178  	// Discard uncommitted changes to the database.
   179  	Abort()
   180  
   181  	// ReindexBlocks converts any block indices using the legacy storage format
   182  	// to the new format. If this database has already updated the indices,
   183  	// this function will return immediately, without iterating over the
   184  	// database.
   185  	//
   186  	// TODO: Remove after v1.12.x is activated
   187  	ReindexBlocks(lock sync.Locker, log logging.Logger) error
   188  
   189  	// Commit changes to the base database.
   190  	Commit() error
   191  
   192  	// Returns a batch of unwritten changes that, when written, will commit all
   193  	// pending changes to the base database.
   194  	CommitBatch() (database.Batch, error)
   195  
   196  	Checksum() ids.ID
   197  
   198  	Close() error
   199  }
   200  
   201  // Prior to https://github.com/MetalBlockchain/metalgo/pull/1719, blocks were
   202  // stored as a map from blkID to stateBlk. Nodes synced prior to this PR may
   203  // still have blocks partially stored using this legacy format.
   204  //
   205  // TODO: Remove after v1.12.x is activated
   206  type stateBlk struct {
   207  	Bytes  []byte         `serialize:"true"`
   208  	Status choices.Status `serialize:"true"`
   209  }
   210  
   211  /*
   212   * VMDB
   213   * |-. validators
   214   * | |-. current
   215   * | | |-. validator
   216   * | | | '-. list
   217   * | | |   '-- txID -> uptime + potential reward + potential delegatee reward
   218   * | | |-. delegator
   219   * | | | '-. list
   220   * | | |   '-- txID -> potential reward
   221   * | | |-. subnetValidator
   222   * | | | '-. list
   223   * | | |   '-- txID -> uptime + potential reward + potential delegatee reward
   224   * | | '-. subnetDelegator
   225   * | |   '-. list
   226   * | |     '-- txID -> potential reward
   227   * | |-. pending
   228   * | | |-. validator
   229   * | | | '-. list
   230   * | | |   '-- txID -> nil
   231   * | | |-. delegator
   232   * | | | '-. list
   233   * | | |   '-- txID -> nil
   234   * | | |-. subnetValidator
   235   * | | | '-. list
   236   * | | |   '-- txID -> nil
   237   * | | '-. subnetDelegator
   238   * | |   '-. list
   239   * | |     '-- txID -> nil
   240   * | |-. weight diffs
   241   * | | '-- subnet+height+nodeID -> weightChange
   242   * | '-. pub key diffs
   243   * |   '-- subnet+height+nodeID -> uncompressed public key or nil
   244   * |-. blockIDs
   245   * | '-- height -> blockID
   246   * |-. blocks
   247   * | '-- blockID -> block bytes
   248   * |-. txs
   249   * | '-- txID -> tx bytes + tx status
   250   * |- rewardUTXOs
   251   * | '-. txID
   252   * |   '-. list
   253   * |     '-- utxoID -> utxo bytes
   254   * |- utxos
   255   * | '-- utxoDB
   256   * |-. subnets
   257   * | '-. list
   258   * |   '-- txID -> nil
   259   * |-. subnetOwners
   260   * | '-. subnetID -> owner
   261   * |-. chains
   262   * | '-. subnetID
   263   * |   '-. list
   264   * |     '-- txID -> nil
   265   * '-. singletons
   266   *   |-- initializedKey -> nil
   267   *   |-- blocksReindexedKey -> nil
   268   *   |-- timestampKey -> timestamp
   269   *   |-- currentSupplyKey -> currentSupply
   270   *   |-- lastAcceptedKey -> lastAccepted
   271   *   '-- heightsIndexKey -> startIndexHeight + endIndexHeight
   272   */
   273  type state struct {
   274  	validatorState
   275  
   276  	validators validators.Manager
   277  	ctx        *snow.Context
   278  	cfg        *config.Config
   279  	metrics    metrics.Metrics
   280  	rewards    reward.Calculator
   281  
   282  	baseDB *versiondb.Database
   283  
   284  	currentStakers *baseStakers
   285  	pendingStakers *baseStakers
   286  
   287  	currentHeight uint64
   288  
   289  	addedBlockIDs map[uint64]ids.ID            // map of height -> blockID
   290  	blockIDCache  cache.Cacher[uint64, ids.ID] // cache of height -> blockID; if the entry is ids.Empty, it is not in the database
   291  	blockIDDB     database.Database
   292  
   293  	addedBlocks map[ids.ID]block.Block            // map of blockID -> Block
   294  	blockCache  cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block; if the entry is nil, it is not in the database
   295  	blockDB     database.Database
   296  
   297  	validatorsDB                 database.Database
   298  	currentValidatorsDB          database.Database
   299  	currentValidatorBaseDB       database.Database
   300  	currentValidatorList         linkeddb.LinkedDB
   301  	currentDelegatorBaseDB       database.Database
   302  	currentDelegatorList         linkeddb.LinkedDB
   303  	currentSubnetValidatorBaseDB database.Database
   304  	currentSubnetValidatorList   linkeddb.LinkedDB
   305  	currentSubnetDelegatorBaseDB database.Database
   306  	currentSubnetDelegatorList   linkeddb.LinkedDB
   307  	pendingValidatorsDB          database.Database
   308  	pendingValidatorBaseDB       database.Database
   309  	pendingValidatorList         linkeddb.LinkedDB
   310  	pendingDelegatorBaseDB       database.Database
   311  	pendingDelegatorList         linkeddb.LinkedDB
   312  	pendingSubnetValidatorBaseDB database.Database
   313  	pendingSubnetValidatorList   linkeddb.LinkedDB
   314  	pendingSubnetDelegatorBaseDB database.Database
   315  	pendingSubnetDelegatorList   linkeddb.LinkedDB
   316  
   317  	validatorWeightDiffsDB    database.Database
   318  	validatorPublicKeyDiffsDB database.Database
   319  
   320  	addedTxs map[ids.ID]*txAndStatus            // map of txID -> {*txs.Tx, Status}
   321  	txCache  cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}; if the entry is nil, it is not in the database
   322  	txDB     database.Database
   323  
   324  	addedRewardUTXOs map[ids.ID][]*avax.UTXO            // map of txID -> []*UTXO
   325  	rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO
   326  	rewardUTXODB     database.Database
   327  
   328  	modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO; if the UTXO is nil, it has been removed
   329  	utxoDB        database.Database
   330  	utxoState     avax.UTXOState
   331  
   332  	cachedSubnetIDs []ids.ID // nil if the subnets haven't been loaded
   333  	addedSubnetIDs  []ids.ID
   334  	subnetBaseDB    database.Database
   335  	subnetDB        linkeddb.LinkedDB
   336  
   337  	subnetOwners     map[ids.ID]fx.Owner                  // map of subnetID -> owner
   338  	subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner; if the entry is nil, it is not in the database
   339  	subnetOwnerDB    database.Database
   340  
   341  	transformedSubnets     map[ids.ID]*txs.Tx            // map of subnetID -> transformSubnetTx
   342  	transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx; if the entry is nil, it is not in the database
   343  	transformedSubnetDB    database.Database
   344  
   345  	modifiedSupplies map[ids.ID]uint64             // map of subnetID -> current supply
   346  	supplyCache      cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply; if the entry is nil, it is not in the database
   347  	supplyDB         database.Database
   348  
   349  	addedChains  map[ids.ID][]*txs.Tx                    // maps subnetID -> the newly added chains to the subnet
   350  	chainCache   cache.Cacher[ids.ID, []*txs.Tx]         // cache of subnetID -> the chains after all local modifications []*txs.Tx
   351  	chainDBCache cache.Cacher[ids.ID, linkeddb.LinkedDB] // cache of subnetID -> linkedDB
   352  	chainDB      database.Database
   353  
   354  	// The persisted fields represent the current database value
   355  	timestamp, persistedTimestamp         time.Time
   356  	currentSupply, persistedCurrentSupply uint64
   357  	// [lastAccepted] is the most recently accepted block.
   358  	lastAccepted, persistedLastAccepted ids.ID
   359  	// TODO: Remove indexedHeights once v1.11.3 has been released.
   360  	indexedHeights *heightRange
   361  	singletonDB    database.Database
   362  }
   363  
   364  // heightRange is used to track which heights are safe to use the native DB
   365  // iterator for querying validator diffs.
   366  //
   367  // TODO: Remove once we are guaranteed nodes can not rollback to not support the
   368  // new indexing mechanism.
   369  type heightRange struct {
   370  	LowerBound uint64 `serialize:"true"`
   371  	UpperBound uint64 `serialize:"true"`
   372  }
   373  
   374  type ValidatorWeightDiff struct {
   375  	Decrease bool   `serialize:"true"`
   376  	Amount   uint64 `serialize:"true"`
   377  }
   378  
   379  func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error {
   380  	if v.Decrease == negative {
   381  		var err error
   382  		v.Amount, err = safemath.Add64(v.Amount, amount)
   383  		return err
   384  	}
   385  
   386  	if v.Amount > amount {
   387  		v.Amount -= amount
   388  	} else {
   389  		v.Amount = safemath.AbsDiff(v.Amount, amount)
   390  		v.Decrease = negative
   391  	}
   392  	return nil
   393  }
   394  
   395  type txBytesAndStatus struct {
   396  	Tx     []byte        `serialize:"true"`
   397  	Status status.Status `serialize:"true"`
   398  }
   399  
   400  type txAndStatus struct {
   401  	tx     *txs.Tx
   402  	status status.Status
   403  }
   404  
   405  type fxOwnerAndSize struct {
   406  	owner fx.Owner
   407  	size  int
   408  }
   409  
   410  func txSize(_ ids.ID, tx *txs.Tx) int {
   411  	if tx == nil {
   412  		return ids.IDLen + constants.PointerOverhead
   413  	}
   414  	return ids.IDLen + len(tx.Bytes()) + constants.PointerOverhead
   415  }
   416  
   417  func txAndStatusSize(_ ids.ID, t *txAndStatus) int {
   418  	if t == nil {
   419  		return ids.IDLen + constants.PointerOverhead
   420  	}
   421  	return ids.IDLen + len(t.tx.Bytes()) + wrappers.IntLen + 2*constants.PointerOverhead
   422  }
   423  
   424  func blockSize(_ ids.ID, blk block.Block) int {
   425  	if blk == nil {
   426  		return ids.IDLen + constants.PointerOverhead
   427  	}
   428  	return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead
   429  }
   430  
   431  func New(
   432  	db database.Database,
   433  	genesisBytes []byte,
   434  	metricsReg prometheus.Registerer,
   435  	cfg *config.Config,
   436  	execCfg *config.ExecutionConfig,
   437  	ctx *snow.Context,
   438  	metrics metrics.Metrics,
   439  	rewards reward.Calculator,
   440  ) (State, error) {
   441  	s, err := newState(
   442  		db,
   443  		metrics,
   444  		cfg,
   445  		execCfg,
   446  		ctx,
   447  		metricsReg,
   448  		rewards,
   449  	)
   450  	if err != nil {
   451  		return nil, err
   452  	}
   453  
   454  	if err := s.sync(genesisBytes); err != nil {
   455  		// Drop any errors on close to return the first error
   456  		_ = s.Close()
   457  
   458  		return nil, err
   459  	}
   460  
   461  	return s, nil
   462  }
   463  
   464  func newState(
   465  	db database.Database,
   466  	metrics metrics.Metrics,
   467  	cfg *config.Config,
   468  	execCfg *config.ExecutionConfig,
   469  	ctx *snow.Context,
   470  	metricsReg prometheus.Registerer,
   471  	rewards reward.Calculator,
   472  ) (*state, error) {
   473  	blockIDCache, err := metercacher.New[uint64, ids.ID](
   474  		"block_id_cache",
   475  		metricsReg,
   476  		&cache.LRU[uint64, ids.ID]{Size: execCfg.BlockIDCacheSize},
   477  	)
   478  	if err != nil {
   479  		return nil, err
   480  	}
   481  
   482  	blockCache, err := metercacher.New[ids.ID, block.Block](
   483  		"block_cache",
   484  		metricsReg,
   485  		cache.NewSizedLRU[ids.ID, block.Block](execCfg.BlockCacheSize, blockSize),
   486  	)
   487  	if err != nil {
   488  		return nil, err
   489  	}
   490  
   491  	baseDB := versiondb.New(db)
   492  
   493  	validatorsDB := prefixdb.New(ValidatorsPrefix, baseDB)
   494  
   495  	currentValidatorsDB := prefixdb.New(CurrentPrefix, validatorsDB)
   496  	currentValidatorBaseDB := prefixdb.New(ValidatorPrefix, currentValidatorsDB)
   497  	currentDelegatorBaseDB := prefixdb.New(DelegatorPrefix, currentValidatorsDB)
   498  	currentSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, currentValidatorsDB)
   499  	currentSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, currentValidatorsDB)
   500  
   501  	pendingValidatorsDB := prefixdb.New(PendingPrefix, validatorsDB)
   502  	pendingValidatorBaseDB := prefixdb.New(ValidatorPrefix, pendingValidatorsDB)
   503  	pendingDelegatorBaseDB := prefixdb.New(DelegatorPrefix, pendingValidatorsDB)
   504  	pendingSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, pendingValidatorsDB)
   505  	pendingSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, pendingValidatorsDB)
   506  
   507  	validatorWeightDiffsDB := prefixdb.New(ValidatorWeightDiffsPrefix, validatorsDB)
   508  	validatorPublicKeyDiffsDB := prefixdb.New(ValidatorPublicKeyDiffsPrefix, validatorsDB)
   509  
   510  	txCache, err := metercacher.New(
   511  		"tx_cache",
   512  		metricsReg,
   513  		cache.NewSizedLRU[ids.ID, *txAndStatus](execCfg.TxCacheSize, txAndStatusSize),
   514  	)
   515  	if err != nil {
   516  		return nil, err
   517  	}
   518  
   519  	rewardUTXODB := prefixdb.New(RewardUTXOsPrefix, baseDB)
   520  	rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO](
   521  		"reward_utxos_cache",
   522  		metricsReg,
   523  		&cache.LRU[ids.ID, []*avax.UTXO]{Size: execCfg.RewardUTXOsCacheSize},
   524  	)
   525  	if err != nil {
   526  		return nil, err
   527  	}
   528  
   529  	utxoDB := prefixdb.New(UTXOPrefix, baseDB)
   530  	utxoState, err := avax.NewMeteredUTXOState(utxoDB, txs.GenesisCodec, metricsReg, execCfg.ChecksumsEnabled)
   531  	if err != nil {
   532  		return nil, err
   533  	}
   534  
   535  	subnetBaseDB := prefixdb.New(SubnetPrefix, baseDB)
   536  
   537  	subnetOwnerDB := prefixdb.New(SubnetOwnerPrefix, baseDB)
   538  	subnetOwnerCache, err := metercacher.New[ids.ID, fxOwnerAndSize](
   539  		"subnet_owner_cache",
   540  		metricsReg,
   541  		cache.NewSizedLRU[ids.ID, fxOwnerAndSize](execCfg.FxOwnerCacheSize, func(_ ids.ID, f fxOwnerAndSize) int {
   542  			return ids.IDLen + f.size
   543  		}),
   544  	)
   545  	if err != nil {
   546  		return nil, err
   547  	}
   548  
   549  	transformedSubnetCache, err := metercacher.New(
   550  		"transformed_subnet_cache",
   551  		metricsReg,
   552  		cache.NewSizedLRU[ids.ID, *txs.Tx](execCfg.TransformedSubnetTxCacheSize, txSize),
   553  	)
   554  	if err != nil {
   555  		return nil, err
   556  	}
   557  
   558  	supplyCache, err := metercacher.New[ids.ID, *uint64](
   559  		"supply_cache",
   560  		metricsReg,
   561  		&cache.LRU[ids.ID, *uint64]{Size: execCfg.ChainCacheSize},
   562  	)
   563  	if err != nil {
   564  		return nil, err
   565  	}
   566  
   567  	chainCache, err := metercacher.New[ids.ID, []*txs.Tx](
   568  		"chain_cache",
   569  		metricsReg,
   570  		&cache.LRU[ids.ID, []*txs.Tx]{Size: execCfg.ChainCacheSize},
   571  	)
   572  	if err != nil {
   573  		return nil, err
   574  	}
   575  
   576  	chainDBCache, err := metercacher.New[ids.ID, linkeddb.LinkedDB](
   577  		"chain_db_cache",
   578  		metricsReg,
   579  		&cache.LRU[ids.ID, linkeddb.LinkedDB]{Size: execCfg.ChainDBCacheSize},
   580  	)
   581  	if err != nil {
   582  		return nil, err
   583  	}
   584  
   585  	return &state{
   586  		validatorState: newValidatorState(),
   587  
   588  		validators: cfg.Validators,
   589  		ctx:        ctx,
   590  		cfg:        cfg,
   591  		metrics:    metrics,
   592  		rewards:    rewards,
   593  		baseDB:     baseDB,
   594  
   595  		addedBlockIDs: make(map[uint64]ids.ID),
   596  		blockIDCache:  blockIDCache,
   597  		blockIDDB:     prefixdb.New(BlockIDPrefix, baseDB),
   598  
   599  		addedBlocks: make(map[ids.ID]block.Block),
   600  		blockCache:  blockCache,
   601  		blockDB:     prefixdb.New(BlockPrefix, baseDB),
   602  
   603  		currentStakers: newBaseStakers(),
   604  		pendingStakers: newBaseStakers(),
   605  
   606  		validatorsDB:                 validatorsDB,
   607  		currentValidatorsDB:          currentValidatorsDB,
   608  		currentValidatorBaseDB:       currentValidatorBaseDB,
   609  		currentValidatorList:         linkeddb.NewDefault(currentValidatorBaseDB),
   610  		currentDelegatorBaseDB:       currentDelegatorBaseDB,
   611  		currentDelegatorList:         linkeddb.NewDefault(currentDelegatorBaseDB),
   612  		currentSubnetValidatorBaseDB: currentSubnetValidatorBaseDB,
   613  		currentSubnetValidatorList:   linkeddb.NewDefault(currentSubnetValidatorBaseDB),
   614  		currentSubnetDelegatorBaseDB: currentSubnetDelegatorBaseDB,
   615  		currentSubnetDelegatorList:   linkeddb.NewDefault(currentSubnetDelegatorBaseDB),
   616  		pendingValidatorsDB:          pendingValidatorsDB,
   617  		pendingValidatorBaseDB:       pendingValidatorBaseDB,
   618  		pendingValidatorList:         linkeddb.NewDefault(pendingValidatorBaseDB),
   619  		pendingDelegatorBaseDB:       pendingDelegatorBaseDB,
   620  		pendingDelegatorList:         linkeddb.NewDefault(pendingDelegatorBaseDB),
   621  		pendingSubnetValidatorBaseDB: pendingSubnetValidatorBaseDB,
   622  		pendingSubnetValidatorList:   linkeddb.NewDefault(pendingSubnetValidatorBaseDB),
   623  		pendingSubnetDelegatorBaseDB: pendingSubnetDelegatorBaseDB,
   624  		pendingSubnetDelegatorList:   linkeddb.NewDefault(pendingSubnetDelegatorBaseDB),
   625  		validatorWeightDiffsDB:       validatorWeightDiffsDB,
   626  		validatorPublicKeyDiffsDB:    validatorPublicKeyDiffsDB,
   627  
   628  		addedTxs: make(map[ids.ID]*txAndStatus),
   629  		txDB:     prefixdb.New(TxPrefix, baseDB),
   630  		txCache:  txCache,
   631  
   632  		addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO),
   633  		rewardUTXODB:     rewardUTXODB,
   634  		rewardUTXOsCache: rewardUTXOsCache,
   635  
   636  		modifiedUTXOs: make(map[ids.ID]*avax.UTXO),
   637  		utxoDB:        utxoDB,
   638  		utxoState:     utxoState,
   639  
   640  		subnetBaseDB: subnetBaseDB,
   641  		subnetDB:     linkeddb.NewDefault(subnetBaseDB),
   642  
   643  		subnetOwners:     make(map[ids.ID]fx.Owner),
   644  		subnetOwnerDB:    subnetOwnerDB,
   645  		subnetOwnerCache: subnetOwnerCache,
   646  
   647  		transformedSubnets:     make(map[ids.ID]*txs.Tx),
   648  		transformedSubnetCache: transformedSubnetCache,
   649  		transformedSubnetDB:    prefixdb.New(TransformedSubnetPrefix, baseDB),
   650  
   651  		modifiedSupplies: make(map[ids.ID]uint64),
   652  		supplyCache:      supplyCache,
   653  		supplyDB:         prefixdb.New(SupplyPrefix, baseDB),
   654  
   655  		addedChains:  make(map[ids.ID][]*txs.Tx),
   656  		chainDB:      prefixdb.New(ChainPrefix, baseDB),
   657  		chainCache:   chainCache,
   658  		chainDBCache: chainDBCache,
   659  
   660  		singletonDB: prefixdb.New(SingletonPrefix, baseDB),
   661  	}, nil
   662  }
   663  
   664  func (s *state) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) {
   665  	return s.currentStakers.GetValidator(subnetID, nodeID)
   666  }
   667  
   668  func (s *state) PutCurrentValidator(staker *Staker) {
   669  	s.currentStakers.PutValidator(staker)
   670  }
   671  
   672  func (s *state) DeleteCurrentValidator(staker *Staker) {
   673  	s.currentStakers.DeleteValidator(staker)
   674  }
   675  
   676  func (s *state) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) {
   677  	return s.currentStakers.GetDelegatorIterator(subnetID, nodeID), nil
   678  }
   679  
   680  func (s *state) PutCurrentDelegator(staker *Staker) {
   681  	s.currentStakers.PutDelegator(staker)
   682  }
   683  
   684  func (s *state) DeleteCurrentDelegator(staker *Staker) {
   685  	s.currentStakers.DeleteDelegator(staker)
   686  }
   687  
   688  func (s *state) GetCurrentStakerIterator() (StakerIterator, error) {
   689  	return s.currentStakers.GetStakerIterator(), nil
   690  }
   691  
   692  func (s *state) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) {
   693  	return s.pendingStakers.GetValidator(subnetID, nodeID)
   694  }
   695  
   696  func (s *state) PutPendingValidator(staker *Staker) {
   697  	s.pendingStakers.PutValidator(staker)
   698  }
   699  
   700  func (s *state) DeletePendingValidator(staker *Staker) {
   701  	s.pendingStakers.DeleteValidator(staker)
   702  }
   703  
   704  func (s *state) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) {
   705  	return s.pendingStakers.GetDelegatorIterator(subnetID, nodeID), nil
   706  }
   707  
   708  func (s *state) PutPendingDelegator(staker *Staker) {
   709  	s.pendingStakers.PutDelegator(staker)
   710  }
   711  
   712  func (s *state) DeletePendingDelegator(staker *Staker) {
   713  	s.pendingStakers.DeleteDelegator(staker)
   714  }
   715  
   716  func (s *state) GetPendingStakerIterator() (StakerIterator, error) {
   717  	return s.pendingStakers.GetStakerIterator(), nil
   718  }
   719  
   720  func (s *state) shouldInit() (bool, error) {
   721  	has, err := s.singletonDB.Has(InitializedKey)
   722  	return !has, err
   723  }
   724  
   725  func (s *state) doneInit() error {
   726  	return s.singletonDB.Put(InitializedKey, nil)
   727  }
   728  
   729  func (s *state) GetSubnetIDs() ([]ids.ID, error) {
   730  	if s.cachedSubnetIDs != nil {
   731  		return s.cachedSubnetIDs, nil
   732  	}
   733  
   734  	subnetDBIt := s.subnetDB.NewIterator()
   735  	defer subnetDBIt.Release()
   736  
   737  	subnetIDs := []ids.ID{}
   738  	for subnetDBIt.Next() {
   739  		subnetIDBytes := subnetDBIt.Key()
   740  		subnetID, err := ids.ToID(subnetIDBytes)
   741  		if err != nil {
   742  			return nil, err
   743  		}
   744  		subnetIDs = append(subnetIDs, subnetID)
   745  	}
   746  	if err := subnetDBIt.Error(); err != nil {
   747  		return nil, err
   748  	}
   749  	subnetIDs = append(subnetIDs, s.addedSubnetIDs...)
   750  	s.cachedSubnetIDs = subnetIDs
   751  	return subnetIDs, nil
   752  }
   753  
   754  func (s *state) AddSubnet(subnetID ids.ID) {
   755  	s.addedSubnetIDs = append(s.addedSubnetIDs, subnetID)
   756  	if s.cachedSubnetIDs != nil {
   757  		s.cachedSubnetIDs = append(s.cachedSubnetIDs, subnetID)
   758  	}
   759  }
   760  
   761  func (s *state) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) {
   762  	if owner, exists := s.subnetOwners[subnetID]; exists {
   763  		return owner, nil
   764  	}
   765  
   766  	if ownerAndSize, cached := s.subnetOwnerCache.Get(subnetID); cached {
   767  		if ownerAndSize.owner == nil {
   768  			return nil, database.ErrNotFound
   769  		}
   770  		return ownerAndSize.owner, nil
   771  	}
   772  
   773  	ownerBytes, err := s.subnetOwnerDB.Get(subnetID[:])
   774  	if err == nil {
   775  		var owner fx.Owner
   776  		if _, err := block.GenesisCodec.Unmarshal(ownerBytes, &owner); err != nil {
   777  			return nil, err
   778  		}
   779  		s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{
   780  			owner: owner,
   781  			size:  len(ownerBytes),
   782  		})
   783  		return owner, nil
   784  	}
   785  	if err != database.ErrNotFound {
   786  		return nil, err
   787  	}
   788  
   789  	subnetIntf, _, err := s.GetTx(subnetID)
   790  	if err != nil {
   791  		if err == database.ErrNotFound {
   792  			s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{})
   793  		}
   794  		return nil, err
   795  	}
   796  
   797  	subnet, ok := subnetIntf.Unsigned.(*txs.CreateSubnetTx)
   798  	if !ok {
   799  		return nil, fmt.Errorf("%q %w", subnetID, errIsNotSubnet)
   800  	}
   801  
   802  	s.SetSubnetOwner(subnetID, subnet.Owner)
   803  	return subnet.Owner, nil
   804  }
   805  
   806  func (s *state) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) {
   807  	s.subnetOwners[subnetID] = owner
   808  }
   809  
   810  func (s *state) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) {
   811  	if tx, exists := s.transformedSubnets[subnetID]; exists {
   812  		return tx, nil
   813  	}
   814  
   815  	if tx, cached := s.transformedSubnetCache.Get(subnetID); cached {
   816  		if tx == nil {
   817  			return nil, database.ErrNotFound
   818  		}
   819  		return tx, nil
   820  	}
   821  
   822  	transformSubnetTxID, err := database.GetID(s.transformedSubnetDB, subnetID[:])
   823  	if err == database.ErrNotFound {
   824  		s.transformedSubnetCache.Put(subnetID, nil)
   825  		return nil, database.ErrNotFound
   826  	}
   827  	if err != nil {
   828  		return nil, err
   829  	}
   830  
   831  	transformSubnetTx, _, err := s.GetTx(transformSubnetTxID)
   832  	if err != nil {
   833  		return nil, err
   834  	}
   835  	s.transformedSubnetCache.Put(subnetID, transformSubnetTx)
   836  	return transformSubnetTx, nil
   837  }
   838  
   839  func (s *state) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) {
   840  	transformSubnetTx := transformSubnetTxIntf.Unsigned.(*txs.TransformSubnetTx)
   841  	s.transformedSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf
   842  }
   843  
   844  func (s *state) GetChains(subnetID ids.ID) ([]*txs.Tx, error) {
   845  	if chains, cached := s.chainCache.Get(subnetID); cached {
   846  		return chains, nil
   847  	}
   848  	chainDB := s.getChainDB(subnetID)
   849  	chainDBIt := chainDB.NewIterator()
   850  	defer chainDBIt.Release()
   851  
   852  	txs := []*txs.Tx(nil)
   853  	for chainDBIt.Next() {
   854  		chainIDBytes := chainDBIt.Key()
   855  		chainID, err := ids.ToID(chainIDBytes)
   856  		if err != nil {
   857  			return nil, err
   858  		}
   859  		chainTx, _, err := s.GetTx(chainID)
   860  		if err != nil {
   861  			return nil, err
   862  		}
   863  		txs = append(txs, chainTx)
   864  	}
   865  	if err := chainDBIt.Error(); err != nil {
   866  		return nil, err
   867  	}
   868  	txs = append(txs, s.addedChains[subnetID]...)
   869  	s.chainCache.Put(subnetID, txs)
   870  	return txs, nil
   871  }
   872  
   873  func (s *state) AddChain(createChainTxIntf *txs.Tx) {
   874  	createChainTx := createChainTxIntf.Unsigned.(*txs.CreateChainTx)
   875  	subnetID := createChainTx.SubnetID
   876  	s.addedChains[subnetID] = append(s.addedChains[subnetID], createChainTxIntf)
   877  	if chains, cached := s.chainCache.Get(subnetID); cached {
   878  		chains = append(chains, createChainTxIntf)
   879  		s.chainCache.Put(subnetID, chains)
   880  	}
   881  }
   882  
   883  func (s *state) getChainDB(subnetID ids.ID) linkeddb.LinkedDB {
   884  	if chainDB, cached := s.chainDBCache.Get(subnetID); cached {
   885  		return chainDB
   886  	}
   887  	rawChainDB := prefixdb.New(subnetID[:], s.chainDB)
   888  	chainDB := linkeddb.NewDefault(rawChainDB)
   889  	s.chainDBCache.Put(subnetID, chainDB)
   890  	return chainDB
   891  }
   892  
   893  func (s *state) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) {
   894  	if tx, exists := s.addedTxs[txID]; exists {
   895  		return tx.tx, tx.status, nil
   896  	}
   897  	if tx, cached := s.txCache.Get(txID); cached {
   898  		if tx == nil {
   899  			return nil, status.Unknown, database.ErrNotFound
   900  		}
   901  		return tx.tx, tx.status, nil
   902  	}
   903  	txBytes, err := s.txDB.Get(txID[:])
   904  	if err == database.ErrNotFound {
   905  		s.txCache.Put(txID, nil)
   906  		return nil, status.Unknown, database.ErrNotFound
   907  	} else if err != nil {
   908  		return nil, status.Unknown, err
   909  	}
   910  
   911  	stx := txBytesAndStatus{}
   912  	if _, err := txs.GenesisCodec.Unmarshal(txBytes, &stx); err != nil {
   913  		return nil, status.Unknown, err
   914  	}
   915  
   916  	tx, err := txs.Parse(txs.GenesisCodec, stx.Tx)
   917  	if err != nil {
   918  		return nil, status.Unknown, err
   919  	}
   920  
   921  	ptx := &txAndStatus{
   922  		tx:     tx,
   923  		status: stx.Status,
   924  	}
   925  
   926  	s.txCache.Put(txID, ptx)
   927  	return ptx.tx, ptx.status, nil
   928  }
   929  
   930  func (s *state) AddTx(tx *txs.Tx, status status.Status) {
   931  	s.addedTxs[tx.ID()] = &txAndStatus{
   932  		tx:     tx,
   933  		status: status,
   934  	}
   935  }
   936  
   937  func (s *state) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) {
   938  	if utxos, exists := s.addedRewardUTXOs[txID]; exists {
   939  		return utxos, nil
   940  	}
   941  	if utxos, exists := s.rewardUTXOsCache.Get(txID); exists {
   942  		return utxos, nil
   943  	}
   944  
   945  	rawTxDB := prefixdb.New(txID[:], s.rewardUTXODB)
   946  	txDB := linkeddb.NewDefault(rawTxDB)
   947  	it := txDB.NewIterator()
   948  	defer it.Release()
   949  
   950  	utxos := []*avax.UTXO(nil)
   951  	for it.Next() {
   952  		utxo := &avax.UTXO{}
   953  		if _, err := txs.Codec.Unmarshal(it.Value(), utxo); err != nil {
   954  			return nil, err
   955  		}
   956  		utxos = append(utxos, utxo)
   957  	}
   958  	if err := it.Error(); err != nil {
   959  		return nil, err
   960  	}
   961  
   962  	s.rewardUTXOsCache.Put(txID, utxos)
   963  	return utxos, nil
   964  }
   965  
   966  func (s *state) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) {
   967  	s.addedRewardUTXOs[txID] = append(s.addedRewardUTXOs[txID], utxo)
   968  }
   969  
   970  func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) {
   971  	if utxo, exists := s.modifiedUTXOs[utxoID]; exists {
   972  		if utxo == nil {
   973  			return nil, database.ErrNotFound
   974  		}
   975  		return utxo, nil
   976  	}
   977  	return s.utxoState.GetUTXO(utxoID)
   978  }
   979  
   980  func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) {
   981  	return s.utxoState.UTXOIDs(addr, start, limit)
   982  }
   983  
   984  func (s *state) AddUTXO(utxo *avax.UTXO) {
   985  	s.modifiedUTXOs[utxo.InputID()] = utxo
   986  }
   987  
   988  func (s *state) DeleteUTXO(utxoID ids.ID) {
   989  	s.modifiedUTXOs[utxoID] = nil
   990  }
   991  
   992  func (s *state) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) {
   993  	staker, err := s.currentStakers.GetValidator(subnetID, nodeID)
   994  	if err != nil {
   995  		return time.Time{}, err
   996  	}
   997  	return staker.StartTime, nil
   998  }
   999  
  1000  func (s *state) GetTimestamp() time.Time {
  1001  	return s.timestamp
  1002  }
  1003  
  1004  func (s *state) SetTimestamp(tm time.Time) {
  1005  	s.timestamp = tm
  1006  }
  1007  
  1008  func (s *state) GetLastAccepted() ids.ID {
  1009  	return s.lastAccepted
  1010  }
  1011  
  1012  func (s *state) SetLastAccepted(lastAccepted ids.ID) {
  1013  	s.lastAccepted = lastAccepted
  1014  }
  1015  
  1016  func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) {
  1017  	if subnetID == constants.PrimaryNetworkID {
  1018  		return s.currentSupply, nil
  1019  	}
  1020  
  1021  	supply, ok := s.modifiedSupplies[subnetID]
  1022  	if ok {
  1023  		return supply, nil
  1024  	}
  1025  
  1026  	cachedSupply, ok := s.supplyCache.Get(subnetID)
  1027  	if ok {
  1028  		if cachedSupply == nil {
  1029  			return 0, database.ErrNotFound
  1030  		}
  1031  		return *cachedSupply, nil
  1032  	}
  1033  
  1034  	supply, err := database.GetUInt64(s.supplyDB, subnetID[:])
  1035  	if err == database.ErrNotFound {
  1036  		s.supplyCache.Put(subnetID, nil)
  1037  		return 0, database.ErrNotFound
  1038  	}
  1039  	if err != nil {
  1040  		return 0, err
  1041  	}
  1042  
  1043  	s.supplyCache.Put(subnetID, &supply)
  1044  	return supply, nil
  1045  }
  1046  
  1047  func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) {
  1048  	if subnetID == constants.PrimaryNetworkID {
  1049  		s.currentSupply = cs
  1050  	} else {
  1051  		s.modifiedSupplies[subnetID] = cs
  1052  	}
  1053  }
  1054  
  1055  func (s *state) ApplyValidatorWeightDiffs(
  1056  	ctx context.Context,
  1057  	validators map[ids.NodeID]*validators.GetValidatorOutput,
  1058  	startHeight uint64,
  1059  	endHeight uint64,
  1060  	subnetID ids.ID,
  1061  ) error {
  1062  	diffIter := s.validatorWeightDiffsDB.NewIteratorWithStartAndPrefix(
  1063  		marshalStartDiffKey(subnetID, startHeight),
  1064  		subnetID[:],
  1065  	)
  1066  	defer diffIter.Release()
  1067  
  1068  	prevHeight := startHeight + 1
  1069  	for diffIter.Next() {
  1070  		if err := ctx.Err(); err != nil {
  1071  			return err
  1072  		}
  1073  
  1074  		_, parsedHeight, nodeID, err := unmarshalDiffKey(diffIter.Key())
  1075  		if err != nil {
  1076  			return err
  1077  		}
  1078  
  1079  		if parsedHeight > prevHeight {
  1080  			s.ctx.Log.Error("unexpected parsed height",
  1081  				zap.Stringer("subnetID", subnetID),
  1082  				zap.Uint64("parsedHeight", parsedHeight),
  1083  				zap.Stringer("nodeID", nodeID),
  1084  				zap.Uint64("prevHeight", prevHeight),
  1085  				zap.Uint64("startHeight", startHeight),
  1086  				zap.Uint64("endHeight", endHeight),
  1087  			)
  1088  		}
  1089  
  1090  		// If the parsedHeight is less than our target endHeight, then we have
  1091  		// fully processed the diffs from startHeight through endHeight.
  1092  		if parsedHeight < endHeight {
  1093  			return diffIter.Error()
  1094  		}
  1095  
  1096  		prevHeight = parsedHeight
  1097  
  1098  		weightDiff, err := unmarshalWeightDiff(diffIter.Value())
  1099  		if err != nil {
  1100  			return err
  1101  		}
  1102  
  1103  		if err := applyWeightDiff(validators, nodeID, weightDiff); err != nil {
  1104  			return err
  1105  		}
  1106  	}
  1107  	return diffIter.Error()
  1108  }
  1109  
  1110  func applyWeightDiff(
  1111  	vdrs map[ids.NodeID]*validators.GetValidatorOutput,
  1112  	nodeID ids.NodeID,
  1113  	weightDiff *ValidatorWeightDiff,
  1114  ) error {
  1115  	vdr, ok := vdrs[nodeID]
  1116  	if !ok {
  1117  		// This node isn't in the current validator set.
  1118  		vdr = &validators.GetValidatorOutput{
  1119  			NodeID: nodeID,
  1120  		}
  1121  		vdrs[nodeID] = vdr
  1122  	}
  1123  
  1124  	// The weight of this node changed at this block.
  1125  	var err error
  1126  	if weightDiff.Decrease {
  1127  		// The validator's weight was decreased at this block, so in the
  1128  		// prior block it was higher.
  1129  		vdr.Weight, err = safemath.Add64(vdr.Weight, weightDiff.Amount)
  1130  	} else {
  1131  		// The validator's weight was increased at this block, so in the
  1132  		// prior block it was lower.
  1133  		vdr.Weight, err = safemath.Sub(vdr.Weight, weightDiff.Amount)
  1134  	}
  1135  	if err != nil {
  1136  		return err
  1137  	}
  1138  
  1139  	if vdr.Weight == 0 {
  1140  		// The validator's weight was 0 before this block so they weren't in the
  1141  		// validator set.
  1142  		delete(vdrs, nodeID)
  1143  	}
  1144  	return nil
  1145  }
  1146  
  1147  func (s *state) ApplyValidatorPublicKeyDiffs(
  1148  	ctx context.Context,
  1149  	validators map[ids.NodeID]*validators.GetValidatorOutput,
  1150  	startHeight uint64,
  1151  	endHeight uint64,
  1152  ) error {
  1153  	diffIter := s.validatorPublicKeyDiffsDB.NewIteratorWithStartAndPrefix(
  1154  		marshalStartDiffKey(constants.PrimaryNetworkID, startHeight),
  1155  		constants.PrimaryNetworkID[:],
  1156  	)
  1157  	defer diffIter.Release()
  1158  
  1159  	for diffIter.Next() {
  1160  		if err := ctx.Err(); err != nil {
  1161  			return err
  1162  		}
  1163  
  1164  		_, parsedHeight, nodeID, err := unmarshalDiffKey(diffIter.Key())
  1165  		if err != nil {
  1166  			return err
  1167  		}
  1168  		// If the parsedHeight is less than our target endHeight, then we have
  1169  		// fully processed the diffs from startHeight through endHeight.
  1170  		if parsedHeight < endHeight {
  1171  			break
  1172  		}
  1173  
  1174  		vdr, ok := validators[nodeID]
  1175  		if !ok {
  1176  			continue
  1177  		}
  1178  
  1179  		pkBytes := diffIter.Value()
  1180  		if len(pkBytes) == 0 {
  1181  			vdr.PublicKey = nil
  1182  			continue
  1183  		}
  1184  
  1185  		vdr.PublicKey = bls.PublicKeyFromValidUncompressedBytes(pkBytes)
  1186  	}
  1187  
  1188  	// Note: this does not fallback to the linkeddb index because the linkeddb
  1189  	// index does not contain entries for when to remove the public key.
  1190  	//
  1191  	// Nodes may see inconsistent public keys for heights before the new public
  1192  	// key index was populated.
  1193  	return diffIter.Error()
  1194  }
  1195  
  1196  func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error {
  1197  	genesisBlkID := genesisBlk.ID()
  1198  	s.SetLastAccepted(genesisBlkID)
  1199  	s.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0))
  1200  	s.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply)
  1201  	s.AddStatelessBlock(genesisBlk)
  1202  
  1203  	// Persist UTXOs that exist at genesis
  1204  	for _, utxo := range genesis.UTXOs {
  1205  		avaxUTXO := utxo.UTXO
  1206  		s.AddUTXO(&avaxUTXO)
  1207  	}
  1208  
  1209  	// Persist primary network validator set at genesis
  1210  	for _, vdrTx := range genesis.Validators {
  1211  		// We expect genesis validator txs to be either AddValidatorTx or
  1212  		// AddPermissionlessValidatorTx.
  1213  		//
  1214  		// TODO: Enforce stricter type check
  1215  		validatorTx, ok := vdrTx.Unsigned.(txs.ScheduledStaker)
  1216  		if !ok {
  1217  			return fmt.Errorf("expected a scheduled staker but got %T", vdrTx.Unsigned)
  1218  		}
  1219  
  1220  		stakeAmount := validatorTx.Weight()
  1221  		// Note: We use [StartTime()] here because genesis transactions are
  1222  		// guaranteed to be pre-Durango activation.
  1223  		startTime := validatorTx.StartTime()
  1224  		stakeDuration := validatorTx.EndTime().Sub(startTime)
  1225  		currentSupply, err := s.GetCurrentSupply(constants.PrimaryNetworkID)
  1226  		if err != nil {
  1227  			return err
  1228  		}
  1229  
  1230  		potentialReward := s.rewards.Calculate(
  1231  			stakeDuration,
  1232  			stakeAmount,
  1233  			currentSupply,
  1234  		)
  1235  		newCurrentSupply, err := safemath.Add64(currentSupply, potentialReward)
  1236  		if err != nil {
  1237  			return err
  1238  		}
  1239  
  1240  		staker, err := NewCurrentStaker(vdrTx.ID(), validatorTx, startTime, potentialReward)
  1241  		if err != nil {
  1242  			return err
  1243  		}
  1244  
  1245  		s.PutCurrentValidator(staker)
  1246  		s.AddTx(vdrTx, status.Committed)
  1247  		s.SetCurrentSupply(constants.PrimaryNetworkID, newCurrentSupply)
  1248  	}
  1249  
  1250  	for _, chain := range genesis.Chains {
  1251  		unsignedChain, ok := chain.Unsigned.(*txs.CreateChainTx)
  1252  		if !ok {
  1253  			return fmt.Errorf("expected tx type *txs.CreateChainTx but got %T", chain.Unsigned)
  1254  		}
  1255  
  1256  		// Ensure all chains that the genesis bytes say to create have the right
  1257  		// network ID
  1258  		if unsignedChain.NetworkID != s.ctx.NetworkID {
  1259  			return avax.ErrWrongNetworkID
  1260  		}
  1261  
  1262  		s.AddChain(chain)
  1263  		s.AddTx(chain, status.Committed)
  1264  	}
  1265  
  1266  	// updateValidators is set to false here to maintain the invariant that the
  1267  	// primary network's validator set is empty before the validator sets are
  1268  	// initialized.
  1269  	return s.write(false /*=updateValidators*/, 0)
  1270  }
  1271  
  1272  // Load pulls data previously stored on disk that is expected to be in memory.
  1273  func (s *state) load() error {
  1274  	return errors.Join(
  1275  		s.loadMetadata(),
  1276  		s.loadCurrentValidators(),
  1277  		s.loadPendingValidators(),
  1278  		s.initValidatorSets(),
  1279  	)
  1280  }
  1281  
  1282  func (s *state) loadMetadata() error {
  1283  	timestamp, err := database.GetTimestamp(s.singletonDB, TimestampKey)
  1284  	if err != nil {
  1285  		return err
  1286  	}
  1287  	s.persistedTimestamp = timestamp
  1288  	s.SetTimestamp(timestamp)
  1289  
  1290  	currentSupply, err := database.GetUInt64(s.singletonDB, CurrentSupplyKey)
  1291  	if err != nil {
  1292  		return err
  1293  	}
  1294  	s.persistedCurrentSupply = currentSupply
  1295  	s.SetCurrentSupply(constants.PrimaryNetworkID, currentSupply)
  1296  
  1297  	lastAccepted, err := database.GetID(s.singletonDB, LastAcceptedKey)
  1298  	if err != nil {
  1299  		return err
  1300  	}
  1301  	s.persistedLastAccepted = lastAccepted
  1302  	s.lastAccepted = lastAccepted
  1303  
  1304  	// Lookup the most recently indexed range on disk. If we haven't started
  1305  	// indexing the weights, then we keep the indexed heights as nil.
  1306  	indexedHeightsBytes, err := s.singletonDB.Get(HeightsIndexedKey)
  1307  	if err == database.ErrNotFound {
  1308  		return nil
  1309  	}
  1310  	if err != nil {
  1311  		return err
  1312  	}
  1313  
  1314  	indexedHeights := &heightRange{}
  1315  	_, err = block.GenesisCodec.Unmarshal(indexedHeightsBytes, indexedHeights)
  1316  	if err != nil {
  1317  		return err
  1318  	}
  1319  
  1320  	// If the indexed range is not up to date, then we will act as if the range
  1321  	// doesn't exist.
  1322  	lastAcceptedBlock, err := s.GetStatelessBlock(lastAccepted)
  1323  	if err != nil {
  1324  		return err
  1325  	}
  1326  	if indexedHeights.UpperBound != lastAcceptedBlock.Height() {
  1327  		return nil
  1328  	}
  1329  	s.indexedHeights = indexedHeights
  1330  	return nil
  1331  }
  1332  
  1333  func (s *state) loadCurrentValidators() error {
  1334  	s.currentStakers = newBaseStakers()
  1335  
  1336  	validatorIt := s.currentValidatorList.NewIterator()
  1337  	defer validatorIt.Release()
  1338  	for validatorIt.Next() {
  1339  		txIDBytes := validatorIt.Key()
  1340  		txID, err := ids.ToID(txIDBytes)
  1341  		if err != nil {
  1342  			return err
  1343  		}
  1344  		tx, _, err := s.GetTx(txID)
  1345  		if err != nil {
  1346  			return fmt.Errorf("failed loading validator transaction txID %s, %w", txID, err)
  1347  		}
  1348  
  1349  		stakerTx, ok := tx.Unsigned.(txs.Staker)
  1350  		if !ok {
  1351  			return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned)
  1352  		}
  1353  
  1354  		metadataBytes := validatorIt.Value()
  1355  		metadata := &validatorMetadata{
  1356  			txID: txID,
  1357  		}
  1358  		if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok {
  1359  			// Populate [StakerStartTime] using the tx as a default in the event
  1360  			// it was added pre-durango and is not stored in the database.
  1361  			//
  1362  			// Note: We do not populate [LastUpdated] since it is expected to
  1363  			// always be present on disk.
  1364  			metadata.StakerStartTime = uint64(scheduledStakerTx.StartTime().Unix())
  1365  		}
  1366  		if err := parseValidatorMetadata(metadataBytes, metadata); err != nil {
  1367  			return err
  1368  		}
  1369  
  1370  		staker, err := NewCurrentStaker(
  1371  			txID,
  1372  			stakerTx,
  1373  			time.Unix(int64(metadata.StakerStartTime), 0),
  1374  			metadata.PotentialReward)
  1375  		if err != nil {
  1376  			return err
  1377  		}
  1378  
  1379  		validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID)
  1380  		validator.validator = staker
  1381  
  1382  		s.currentStakers.stakers.ReplaceOrInsert(staker)
  1383  
  1384  		s.validatorState.LoadValidatorMetadata(staker.NodeID, staker.SubnetID, metadata)
  1385  	}
  1386  
  1387  	subnetValidatorIt := s.currentSubnetValidatorList.NewIterator()
  1388  	defer subnetValidatorIt.Release()
  1389  	for subnetValidatorIt.Next() {
  1390  		txIDBytes := subnetValidatorIt.Key()
  1391  		txID, err := ids.ToID(txIDBytes)
  1392  		if err != nil {
  1393  			return err
  1394  		}
  1395  		tx, _, err := s.GetTx(txID)
  1396  		if err != nil {
  1397  			return err
  1398  		}
  1399  
  1400  		stakerTx, ok := tx.Unsigned.(txs.Staker)
  1401  		if !ok {
  1402  			return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned)
  1403  		}
  1404  
  1405  		metadataBytes := subnetValidatorIt.Value()
  1406  		metadata := &validatorMetadata{
  1407  			txID: txID,
  1408  		}
  1409  		if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok {
  1410  			// Populate [StakerStartTime] and [LastUpdated] using the tx as a
  1411  			// default in the event they are not stored in the database.
  1412  			startTime := uint64(scheduledStakerTx.StartTime().Unix())
  1413  			metadata.StakerStartTime = startTime
  1414  			metadata.LastUpdated = startTime
  1415  		}
  1416  		if err := parseValidatorMetadata(metadataBytes, metadata); err != nil {
  1417  			return err
  1418  		}
  1419  
  1420  		staker, err := NewCurrentStaker(
  1421  			txID,
  1422  			stakerTx,
  1423  			time.Unix(int64(metadata.StakerStartTime), 0),
  1424  			metadata.PotentialReward,
  1425  		)
  1426  		if err != nil {
  1427  			return err
  1428  		}
  1429  		validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID)
  1430  		validator.validator = staker
  1431  
  1432  		s.currentStakers.stakers.ReplaceOrInsert(staker)
  1433  
  1434  		s.validatorState.LoadValidatorMetadata(staker.NodeID, staker.SubnetID, metadata)
  1435  	}
  1436  
  1437  	delegatorIt := s.currentDelegatorList.NewIterator()
  1438  	defer delegatorIt.Release()
  1439  
  1440  	subnetDelegatorIt := s.currentSubnetDelegatorList.NewIterator()
  1441  	defer subnetDelegatorIt.Release()
  1442  
  1443  	for _, delegatorIt := range []database.Iterator{delegatorIt, subnetDelegatorIt} {
  1444  		for delegatorIt.Next() {
  1445  			txIDBytes := delegatorIt.Key()
  1446  			txID, err := ids.ToID(txIDBytes)
  1447  			if err != nil {
  1448  				return err
  1449  			}
  1450  			tx, _, err := s.GetTx(txID)
  1451  			if err != nil {
  1452  				return err
  1453  			}
  1454  
  1455  			stakerTx, ok := tx.Unsigned.(txs.Staker)
  1456  			if !ok {
  1457  				return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned)
  1458  			}
  1459  
  1460  			metadataBytes := delegatorIt.Value()
  1461  			metadata := &delegatorMetadata{
  1462  				txID: txID,
  1463  			}
  1464  			if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok {
  1465  				// Populate [StakerStartTime] using the tx as a default in the
  1466  				// event it was added pre-durango and is not stored in the
  1467  				// database.
  1468  				metadata.StakerStartTime = uint64(scheduledStakerTx.StartTime().Unix())
  1469  			}
  1470  			err = parseDelegatorMetadata(metadataBytes, metadata)
  1471  			if err != nil {
  1472  				return err
  1473  			}
  1474  
  1475  			staker, err := NewCurrentStaker(
  1476  				txID,
  1477  				stakerTx,
  1478  				time.Unix(int64(metadata.StakerStartTime), 0),
  1479  				metadata.PotentialReward,
  1480  			)
  1481  			if err != nil {
  1482  				return err
  1483  			}
  1484  
  1485  			validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID)
  1486  			if validator.delegators == nil {
  1487  				validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less)
  1488  			}
  1489  			validator.delegators.ReplaceOrInsert(staker)
  1490  
  1491  			s.currentStakers.stakers.ReplaceOrInsert(staker)
  1492  		}
  1493  	}
  1494  
  1495  	return errors.Join(
  1496  		validatorIt.Error(),
  1497  		subnetValidatorIt.Error(),
  1498  		delegatorIt.Error(),
  1499  		subnetDelegatorIt.Error(),
  1500  	)
  1501  }
  1502  
  1503  func (s *state) loadPendingValidators() error {
  1504  	s.pendingStakers = newBaseStakers()
  1505  
  1506  	validatorIt := s.pendingValidatorList.NewIterator()
  1507  	defer validatorIt.Release()
  1508  
  1509  	subnetValidatorIt := s.pendingSubnetValidatorList.NewIterator()
  1510  	defer subnetValidatorIt.Release()
  1511  
  1512  	for _, validatorIt := range []database.Iterator{validatorIt, subnetValidatorIt} {
  1513  		for validatorIt.Next() {
  1514  			txIDBytes := validatorIt.Key()
  1515  			txID, err := ids.ToID(txIDBytes)
  1516  			if err != nil {
  1517  				return err
  1518  			}
  1519  			tx, _, err := s.GetTx(txID)
  1520  			if err != nil {
  1521  				return err
  1522  			}
  1523  
  1524  			stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker)
  1525  			if !ok {
  1526  				return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned)
  1527  			}
  1528  
  1529  			staker, err := NewPendingStaker(txID, stakerTx)
  1530  			if err != nil {
  1531  				return err
  1532  			}
  1533  
  1534  			validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID)
  1535  			validator.validator = staker
  1536  
  1537  			s.pendingStakers.stakers.ReplaceOrInsert(staker)
  1538  		}
  1539  	}
  1540  
  1541  	delegatorIt := s.pendingDelegatorList.NewIterator()
  1542  	defer delegatorIt.Release()
  1543  
  1544  	subnetDelegatorIt := s.pendingSubnetDelegatorList.NewIterator()
  1545  	defer subnetDelegatorIt.Release()
  1546  
  1547  	for _, delegatorIt := range []database.Iterator{delegatorIt, subnetDelegatorIt} {
  1548  		for delegatorIt.Next() {
  1549  			txIDBytes := delegatorIt.Key()
  1550  			txID, err := ids.ToID(txIDBytes)
  1551  			if err != nil {
  1552  				return err
  1553  			}
  1554  			tx, _, err := s.GetTx(txID)
  1555  			if err != nil {
  1556  				return err
  1557  			}
  1558  
  1559  			stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker)
  1560  			if !ok {
  1561  				return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned)
  1562  			}
  1563  
  1564  			staker, err := NewPendingStaker(txID, stakerTx)
  1565  			if err != nil {
  1566  				return err
  1567  			}
  1568  
  1569  			validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID)
  1570  			if validator.delegators == nil {
  1571  				validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less)
  1572  			}
  1573  			validator.delegators.ReplaceOrInsert(staker)
  1574  
  1575  			s.pendingStakers.stakers.ReplaceOrInsert(staker)
  1576  		}
  1577  	}
  1578  
  1579  	return errors.Join(
  1580  		validatorIt.Error(),
  1581  		subnetValidatorIt.Error(),
  1582  		delegatorIt.Error(),
  1583  		subnetDelegatorIt.Error(),
  1584  	)
  1585  }
  1586  
  1587  // Invariant: initValidatorSets requires loadCurrentValidators to have already
  1588  // been called.
  1589  func (s *state) initValidatorSets() error {
  1590  	for subnetID, validators := range s.currentStakers.validators {
  1591  		if s.validators.Count(subnetID) != 0 {
  1592  			// Enforce the invariant that the validator set is empty here.
  1593  			return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID)
  1594  		}
  1595  
  1596  		for nodeID, validator := range validators {
  1597  			validatorStaker := validator.validator
  1598  			if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil {
  1599  				return err
  1600  			}
  1601  
  1602  			delegatorIterator := NewTreeIterator(validator.delegators)
  1603  			for delegatorIterator.Next() {
  1604  				delegatorStaker := delegatorIterator.Value()
  1605  				if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil {
  1606  					delegatorIterator.Release()
  1607  					return err
  1608  				}
  1609  			}
  1610  			delegatorIterator.Release()
  1611  		}
  1612  	}
  1613  
  1614  	s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID))
  1615  	totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID)
  1616  	if err != nil {
  1617  		return fmt.Errorf("failed to get total weight of primary network validators: %w", err)
  1618  	}
  1619  	s.metrics.SetTotalStake(totalWeight)
  1620  	return nil
  1621  }
  1622  
  1623  func (s *state) write(updateValidators bool, height uint64) error {
  1624  	codecVersion := CodecVersion1
  1625  	if !s.cfg.UpgradeConfig.IsDurangoActivated(s.GetTimestamp()) {
  1626  		codecVersion = CodecVersion0
  1627  	}
  1628  
  1629  	return errors.Join(
  1630  		s.writeBlocks(),
  1631  		s.writeCurrentStakers(updateValidators, height, codecVersion),
  1632  		s.writePendingStakers(),
  1633  		s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers
  1634  		s.writeTXs(),
  1635  		s.writeRewardUTXOs(),
  1636  		s.writeUTXOs(),
  1637  		s.writeSubnets(),
  1638  		s.writeSubnetOwners(),
  1639  		s.writeTransformedSubnets(),
  1640  		s.writeSubnetSupplies(),
  1641  		s.writeChains(),
  1642  		s.writeMetadata(),
  1643  	)
  1644  }
  1645  
  1646  func (s *state) Close() error {
  1647  	return errors.Join(
  1648  		s.pendingSubnetValidatorBaseDB.Close(),
  1649  		s.pendingSubnetDelegatorBaseDB.Close(),
  1650  		s.pendingDelegatorBaseDB.Close(),
  1651  		s.pendingValidatorBaseDB.Close(),
  1652  		s.pendingValidatorsDB.Close(),
  1653  		s.currentSubnetValidatorBaseDB.Close(),
  1654  		s.currentSubnetDelegatorBaseDB.Close(),
  1655  		s.currentDelegatorBaseDB.Close(),
  1656  		s.currentValidatorBaseDB.Close(),
  1657  		s.currentValidatorsDB.Close(),
  1658  		s.validatorsDB.Close(),
  1659  		s.txDB.Close(),
  1660  		s.rewardUTXODB.Close(),
  1661  		s.utxoDB.Close(),
  1662  		s.subnetBaseDB.Close(),
  1663  		s.transformedSubnetDB.Close(),
  1664  		s.supplyDB.Close(),
  1665  		s.chainDB.Close(),
  1666  		s.singletonDB.Close(),
  1667  		s.blockDB.Close(),
  1668  		s.blockIDDB.Close(),
  1669  	)
  1670  }
  1671  
  1672  func (s *state) sync(genesis []byte) error {
  1673  	shouldInit, err := s.shouldInit()
  1674  	if err != nil {
  1675  		return fmt.Errorf(
  1676  			"failed to check if the database is initialized: %w",
  1677  			err,
  1678  		)
  1679  	}
  1680  
  1681  	// If the database is empty, create the platform chain anew using the
  1682  	// provided genesis state
  1683  	if shouldInit {
  1684  		if err := s.init(genesis); err != nil {
  1685  			return fmt.Errorf(
  1686  				"failed to initialize the database: %w",
  1687  				err,
  1688  			)
  1689  		}
  1690  	}
  1691  
  1692  	if err := s.load(); err != nil {
  1693  		return fmt.Errorf(
  1694  			"failed to load the database state: %w",
  1695  			err,
  1696  		)
  1697  	}
  1698  	return nil
  1699  }
  1700  
  1701  func (s *state) init(genesisBytes []byte) error {
  1702  	// Create the genesis block and save it as being accepted (We don't do
  1703  	// genesisBlock.Accept() because then it'd look for genesisBlock's
  1704  	// non-existent parent)
  1705  	genesisID := hashing.ComputeHash256Array(genesisBytes)
  1706  	genesisBlock, err := block.NewApricotCommitBlock(genesisID, 0 /*height*/)
  1707  	if err != nil {
  1708  		return err
  1709  	}
  1710  
  1711  	genesis, err := genesis.Parse(genesisBytes)
  1712  	if err != nil {
  1713  		return err
  1714  	}
  1715  	if err := s.syncGenesis(genesisBlock, genesis); err != nil {
  1716  		return err
  1717  	}
  1718  
  1719  	if err := s.doneInit(); err != nil {
  1720  		return err
  1721  	}
  1722  
  1723  	return s.Commit()
  1724  }
  1725  
  1726  func (s *state) AddStatelessBlock(block block.Block) {
  1727  	blkID := block.ID()
  1728  	s.addedBlockIDs[block.Height()] = blkID
  1729  	s.addedBlocks[blkID] = block
  1730  }
  1731  
  1732  func (s *state) SetHeight(height uint64) {
  1733  	if s.indexedHeights == nil {
  1734  		// If indexedHeights hasn't been created yet, then we are newly tracking
  1735  		// the range. This means we should initialize the LowerBound to the
  1736  		// current height.
  1737  		s.indexedHeights = &heightRange{
  1738  			LowerBound: height,
  1739  		}
  1740  	}
  1741  
  1742  	s.indexedHeights.UpperBound = height
  1743  	s.currentHeight = height
  1744  }
  1745  
  1746  func (s *state) Commit() error {
  1747  	defer s.Abort()
  1748  	batch, err := s.CommitBatch()
  1749  	if err != nil {
  1750  		return err
  1751  	}
  1752  	return batch.Write()
  1753  }
  1754  
  1755  func (s *state) Abort() {
  1756  	s.baseDB.Abort()
  1757  }
  1758  
  1759  func (s *state) Checksum() ids.ID {
  1760  	return s.utxoState.Checksum()
  1761  }
  1762  
  1763  func (s *state) CommitBatch() (database.Batch, error) {
  1764  	// updateValidators is set to true here so that the validator manager is
  1765  	// kept up to date with the last accepted state.
  1766  	if err := s.write(true /*=updateValidators*/, s.currentHeight); err != nil {
  1767  		return nil, err
  1768  	}
  1769  	return s.baseDB.CommitBatch()
  1770  }
  1771  
  1772  func (s *state) writeBlocks() error {
  1773  	for blkID, blk := range s.addedBlocks {
  1774  		blkID := blkID
  1775  		blkBytes := blk.Bytes()
  1776  		blkHeight := blk.Height()
  1777  		heightKey := database.PackUInt64(blkHeight)
  1778  
  1779  		delete(s.addedBlockIDs, blkHeight)
  1780  		s.blockIDCache.Put(blkHeight, blkID)
  1781  		if err := database.PutID(s.blockIDDB, heightKey, blkID); err != nil {
  1782  			return fmt.Errorf("failed to add blockID: %w", err)
  1783  		}
  1784  
  1785  		delete(s.addedBlocks, blkID)
  1786  		// Note: Evict is used rather than Put here because blk may end up
  1787  		// referencing additional data (because of shared byte slices) that
  1788  		// would not be properly accounted for in the cache sizing.
  1789  		s.blockCache.Evict(blkID)
  1790  		if err := s.blockDB.Put(blkID[:], blkBytes); err != nil {
  1791  			return fmt.Errorf("failed to write block %s: %w", blkID, err)
  1792  		}
  1793  	}
  1794  	return nil
  1795  }
  1796  
  1797  func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) {
  1798  	if blk, exists := s.addedBlocks[blockID]; exists {
  1799  		return blk, nil
  1800  	}
  1801  	if blk, cached := s.blockCache.Get(blockID); cached {
  1802  		if blk == nil {
  1803  			return nil, database.ErrNotFound
  1804  		}
  1805  
  1806  		return blk, nil
  1807  	}
  1808  
  1809  	blkBytes, err := s.blockDB.Get(blockID[:])
  1810  	if err == database.ErrNotFound {
  1811  		s.blockCache.Put(blockID, nil)
  1812  		return nil, database.ErrNotFound
  1813  	}
  1814  	if err != nil {
  1815  		return nil, err
  1816  	}
  1817  
  1818  	blk, _, err := parseStoredBlock(blkBytes)
  1819  	if err != nil {
  1820  		return nil, err
  1821  	}
  1822  
  1823  	s.blockCache.Put(blockID, blk)
  1824  	return blk, nil
  1825  }
  1826  
  1827  func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) {
  1828  	if blkID, exists := s.addedBlockIDs[height]; exists {
  1829  		return blkID, nil
  1830  	}
  1831  	if blkID, cached := s.blockIDCache.Get(height); cached {
  1832  		if blkID == ids.Empty {
  1833  			return ids.Empty, database.ErrNotFound
  1834  		}
  1835  
  1836  		return blkID, nil
  1837  	}
  1838  
  1839  	heightKey := database.PackUInt64(height)
  1840  
  1841  	blkID, err := database.GetID(s.blockIDDB, heightKey)
  1842  	if err == database.ErrNotFound {
  1843  		s.blockIDCache.Put(height, ids.Empty)
  1844  		return ids.Empty, database.ErrNotFound
  1845  	}
  1846  	if err != nil {
  1847  		return ids.Empty, err
  1848  	}
  1849  
  1850  	s.blockIDCache.Put(height, blkID)
  1851  	return blkID, nil
  1852  }
  1853  
  1854  func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error {
  1855  	for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs {
  1856  		delete(s.currentStakers.validatorDiffs, subnetID)
  1857  
  1858  		// Select db to write to
  1859  		validatorDB := s.currentSubnetValidatorList
  1860  		delegatorDB := s.currentSubnetDelegatorList
  1861  		if subnetID == constants.PrimaryNetworkID {
  1862  			validatorDB = s.currentValidatorList
  1863  			delegatorDB = s.currentDelegatorList
  1864  		}
  1865  
  1866  		// Record the change in weight and/or public key for each validator.
  1867  		for nodeID, validatorDiff := range validatorDiffs {
  1868  			// Copy [nodeID] so it doesn't get overwritten next iteration.
  1869  			nodeID := nodeID
  1870  
  1871  			weightDiff := &ValidatorWeightDiff{
  1872  				Decrease: validatorDiff.validatorStatus == deleted,
  1873  			}
  1874  			switch validatorDiff.validatorStatus {
  1875  			case added:
  1876  				staker := validatorDiff.validator
  1877  				weightDiff.Amount = staker.Weight
  1878  
  1879  				// Invariant: Only the Primary Network contains non-nil public
  1880  				// keys.
  1881  				if staker.PublicKey != nil {
  1882  					// Record that the public key for the validator is being
  1883  					// added. This means the prior value for the public key was
  1884  					// nil.
  1885  					err := s.validatorPublicKeyDiffsDB.Put(
  1886  						marshalDiffKey(constants.PrimaryNetworkID, height, nodeID),
  1887  						nil,
  1888  					)
  1889  					if err != nil {
  1890  						return err
  1891  					}
  1892  				}
  1893  
  1894  				// The validator is being added.
  1895  				//
  1896  				// Invariant: It's impossible for a delegator to have been
  1897  				// rewarded in the same block that the validator was added.
  1898  				startTime := uint64(staker.StartTime.Unix())
  1899  				metadata := &validatorMetadata{
  1900  					txID:        staker.TxID,
  1901  					lastUpdated: staker.StartTime,
  1902  
  1903  					UpDuration:               0,
  1904  					LastUpdated:              startTime,
  1905  					StakerStartTime:          startTime,
  1906  					PotentialReward:          staker.PotentialReward,
  1907  					PotentialDelegateeReward: 0,
  1908  				}
  1909  
  1910  				metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata)
  1911  				if err != nil {
  1912  					return fmt.Errorf("failed to serialize current validator: %w", err)
  1913  				}
  1914  
  1915  				if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil {
  1916  					return fmt.Errorf("failed to write current validator to list: %w", err)
  1917  				}
  1918  
  1919  				s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata)
  1920  			case deleted:
  1921  				staker := validatorDiff.validator
  1922  				weightDiff.Amount = staker.Weight
  1923  
  1924  				// Invariant: Only the Primary Network contains non-nil public
  1925  				// keys.
  1926  				if staker.PublicKey != nil {
  1927  					// Record that the public key for the validator is being
  1928  					// removed. This means we must record the prior value of the
  1929  					// public key.
  1930  					//
  1931  					// Note: We store the uncompressed public key here as it is
  1932  					// significantly more efficient to parse when applying
  1933  					// diffs.
  1934  					err := s.validatorPublicKeyDiffsDB.Put(
  1935  						marshalDiffKey(constants.PrimaryNetworkID, height, nodeID),
  1936  						bls.PublicKeyToUncompressedBytes(staker.PublicKey),
  1937  					)
  1938  					if err != nil {
  1939  						return err
  1940  					}
  1941  				}
  1942  
  1943  				if err := validatorDB.Delete(staker.TxID[:]); err != nil {
  1944  					return fmt.Errorf("failed to delete current staker: %w", err)
  1945  				}
  1946  
  1947  				s.validatorState.DeleteValidatorMetadata(nodeID, subnetID)
  1948  			}
  1949  
  1950  			err := writeCurrentDelegatorDiff(
  1951  				delegatorDB,
  1952  				weightDiff,
  1953  				validatorDiff,
  1954  				codecVersion,
  1955  			)
  1956  			if err != nil {
  1957  				return err
  1958  			}
  1959  
  1960  			if weightDiff.Amount == 0 {
  1961  				// No weight change to record; go to next validator.
  1962  				continue
  1963  			}
  1964  
  1965  			err = s.validatorWeightDiffsDB.Put(
  1966  				marshalDiffKey(subnetID, height, nodeID),
  1967  				marshalWeightDiff(weightDiff),
  1968  			)
  1969  			if err != nil {
  1970  				return err
  1971  			}
  1972  
  1973  			// TODO: Move the validator set management out of the state package
  1974  			if !updateValidators {
  1975  				continue
  1976  			}
  1977  
  1978  			if weightDiff.Decrease {
  1979  				err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount)
  1980  			} else {
  1981  				if validatorDiff.validatorStatus == added {
  1982  					staker := validatorDiff.validator
  1983  					err = s.validators.AddStaker(
  1984  						subnetID,
  1985  						nodeID,
  1986  						staker.PublicKey,
  1987  						staker.TxID,
  1988  						weightDiff.Amount,
  1989  					)
  1990  				} else {
  1991  					err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount)
  1992  				}
  1993  			}
  1994  			if err != nil {
  1995  				return fmt.Errorf("failed to update validator weight: %w", err)
  1996  			}
  1997  		}
  1998  	}
  1999  
  2000  	// TODO: Move validator set management out of the state package
  2001  	//
  2002  	// Attempt to update the stake metrics
  2003  	if !updateValidators {
  2004  		return nil
  2005  	}
  2006  
  2007  	totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID)
  2008  	if err != nil {
  2009  		return fmt.Errorf("failed to get total weight of primary network: %w", err)
  2010  	}
  2011  
  2012  	s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID))
  2013  	s.metrics.SetTotalStake(totalWeight)
  2014  	return nil
  2015  }
  2016  
  2017  func writeCurrentDelegatorDiff(
  2018  	currentDelegatorList linkeddb.LinkedDB,
  2019  	weightDiff *ValidatorWeightDiff,
  2020  	validatorDiff *diffValidator,
  2021  	codecVersion uint16,
  2022  ) error {
  2023  	addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators)
  2024  	defer addedDelegatorIterator.Release()
  2025  	for addedDelegatorIterator.Next() {
  2026  		staker := addedDelegatorIterator.Value()
  2027  
  2028  		if err := weightDiff.Add(false, staker.Weight); err != nil {
  2029  			return fmt.Errorf("failed to increase node weight diff: %w", err)
  2030  		}
  2031  
  2032  		metadata := &delegatorMetadata{
  2033  			txID:            staker.TxID,
  2034  			PotentialReward: staker.PotentialReward,
  2035  			StakerStartTime: uint64(staker.StartTime.Unix()),
  2036  		}
  2037  		if err := writeDelegatorMetadata(currentDelegatorList, metadata, codecVersion); err != nil {
  2038  			return fmt.Errorf("failed to write current delegator to list: %w", err)
  2039  		}
  2040  	}
  2041  
  2042  	for _, staker := range validatorDiff.deletedDelegators {
  2043  		if err := weightDiff.Add(true, staker.Weight); err != nil {
  2044  			return fmt.Errorf("failed to decrease node weight diff: %w", err)
  2045  		}
  2046  
  2047  		if err := currentDelegatorList.Delete(staker.TxID[:]); err != nil {
  2048  			return fmt.Errorf("failed to delete current staker: %w", err)
  2049  		}
  2050  	}
  2051  	return nil
  2052  }
  2053  
  2054  func (s *state) writePendingStakers() error {
  2055  	for subnetID, subnetValidatorDiffs := range s.pendingStakers.validatorDiffs {
  2056  		delete(s.pendingStakers.validatorDiffs, subnetID)
  2057  
  2058  		validatorDB := s.pendingSubnetValidatorList
  2059  		delegatorDB := s.pendingSubnetDelegatorList
  2060  		if subnetID == constants.PrimaryNetworkID {
  2061  			validatorDB = s.pendingValidatorList
  2062  			delegatorDB = s.pendingDelegatorList
  2063  		}
  2064  
  2065  		for _, validatorDiff := range subnetValidatorDiffs {
  2066  			err := writePendingDiff(
  2067  				validatorDB,
  2068  				delegatorDB,
  2069  				validatorDiff,
  2070  			)
  2071  			if err != nil {
  2072  				return err
  2073  			}
  2074  		}
  2075  	}
  2076  	return nil
  2077  }
  2078  
  2079  func writePendingDiff(
  2080  	pendingValidatorList linkeddb.LinkedDB,
  2081  	pendingDelegatorList linkeddb.LinkedDB,
  2082  	validatorDiff *diffValidator,
  2083  ) error {
  2084  	switch validatorDiff.validatorStatus {
  2085  	case added:
  2086  		err := pendingValidatorList.Put(validatorDiff.validator.TxID[:], nil)
  2087  		if err != nil {
  2088  			return fmt.Errorf("failed to add pending validator: %w", err)
  2089  		}
  2090  	case deleted:
  2091  		err := pendingValidatorList.Delete(validatorDiff.validator.TxID[:])
  2092  		if err != nil {
  2093  			return fmt.Errorf("failed to delete pending validator: %w", err)
  2094  		}
  2095  	}
  2096  
  2097  	addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators)
  2098  	defer addedDelegatorIterator.Release()
  2099  	for addedDelegatorIterator.Next() {
  2100  		staker := addedDelegatorIterator.Value()
  2101  
  2102  		if err := pendingDelegatorList.Put(staker.TxID[:], nil); err != nil {
  2103  			return fmt.Errorf("failed to write pending delegator to list: %w", err)
  2104  		}
  2105  	}
  2106  
  2107  	for _, staker := range validatorDiff.deletedDelegators {
  2108  		if err := pendingDelegatorList.Delete(staker.TxID[:]); err != nil {
  2109  			return fmt.Errorf("failed to delete pending delegator: %w", err)
  2110  		}
  2111  	}
  2112  	return nil
  2113  }
  2114  
  2115  func (s *state) writeTXs() error {
  2116  	for txID, txStatus := range s.addedTxs {
  2117  		txID := txID
  2118  
  2119  		stx := txBytesAndStatus{
  2120  			Tx:     txStatus.tx.Bytes(),
  2121  			Status: txStatus.status,
  2122  		}
  2123  
  2124  		// Note that we're serializing a [txBytesAndStatus] here, not a
  2125  		// *txs.Tx, so we don't use [txs.Codec].
  2126  		txBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, &stx)
  2127  		if err != nil {
  2128  			return fmt.Errorf("failed to serialize tx: %w", err)
  2129  		}
  2130  
  2131  		delete(s.addedTxs, txID)
  2132  		// Note: Evict is used rather than Put here because stx may end up
  2133  		// referencing additional data (because of shared byte slices) that
  2134  		// would not be properly accounted for in the cache sizing.
  2135  		s.txCache.Evict(txID)
  2136  		if err := s.txDB.Put(txID[:], txBytes); err != nil {
  2137  			return fmt.Errorf("failed to add tx: %w", err)
  2138  		}
  2139  	}
  2140  	return nil
  2141  }
  2142  
  2143  func (s *state) writeRewardUTXOs() error {
  2144  	for txID, utxos := range s.addedRewardUTXOs {
  2145  		delete(s.addedRewardUTXOs, txID)
  2146  		s.rewardUTXOsCache.Put(txID, utxos)
  2147  		rawTxDB := prefixdb.New(txID[:], s.rewardUTXODB)
  2148  		txDB := linkeddb.NewDefault(rawTxDB)
  2149  
  2150  		for _, utxo := range utxos {
  2151  			utxoBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, utxo)
  2152  			if err != nil {
  2153  				return fmt.Errorf("failed to serialize reward UTXO: %w", err)
  2154  			}
  2155  			utxoID := utxo.InputID()
  2156  			if err := txDB.Put(utxoID[:], utxoBytes); err != nil {
  2157  				return fmt.Errorf("failed to add reward UTXO: %w", err)
  2158  			}
  2159  		}
  2160  	}
  2161  	return nil
  2162  }
  2163  
  2164  func (s *state) writeUTXOs() error {
  2165  	for utxoID, utxo := range s.modifiedUTXOs {
  2166  		delete(s.modifiedUTXOs, utxoID)
  2167  
  2168  		if utxo == nil {
  2169  			if err := s.utxoState.DeleteUTXO(utxoID); err != nil {
  2170  				return fmt.Errorf("failed to delete UTXO: %w", err)
  2171  			}
  2172  			continue
  2173  		}
  2174  		if err := s.utxoState.PutUTXO(utxo); err != nil {
  2175  			return fmt.Errorf("failed to add UTXO: %w", err)
  2176  		}
  2177  	}
  2178  	return nil
  2179  }
  2180  
  2181  func (s *state) writeSubnets() error {
  2182  	for _, subnetID := range s.addedSubnetIDs {
  2183  		if err := s.subnetDB.Put(subnetID[:], nil); err != nil {
  2184  			return fmt.Errorf("failed to write subnet: %w", err)
  2185  		}
  2186  	}
  2187  	s.addedSubnetIDs = nil
  2188  	return nil
  2189  }
  2190  
  2191  func (s *state) writeSubnetOwners() error {
  2192  	for subnetID, owner := range s.subnetOwners {
  2193  		subnetID := subnetID
  2194  		owner := owner
  2195  		delete(s.subnetOwners, subnetID)
  2196  
  2197  		ownerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &owner)
  2198  		if err != nil {
  2199  			return fmt.Errorf("failed to marshal subnet owner: %w", err)
  2200  		}
  2201  
  2202  		s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{
  2203  			owner: owner,
  2204  			size:  len(ownerBytes),
  2205  		})
  2206  
  2207  		if err := s.subnetOwnerDB.Put(subnetID[:], ownerBytes); err != nil {
  2208  			return fmt.Errorf("failed to write subnet owner: %w", err)
  2209  		}
  2210  	}
  2211  	return nil
  2212  }
  2213  
  2214  func (s *state) writeTransformedSubnets() error {
  2215  	for subnetID, tx := range s.transformedSubnets {
  2216  		txID := tx.ID()
  2217  
  2218  		delete(s.transformedSubnets, subnetID)
  2219  		// Note: Evict is used rather than Put here because tx may end up
  2220  		// referencing additional data (because of shared byte slices) that
  2221  		// would not be properly accounted for in the cache sizing.
  2222  		s.transformedSubnetCache.Evict(subnetID)
  2223  		if err := database.PutID(s.transformedSubnetDB, subnetID[:], txID); err != nil {
  2224  			return fmt.Errorf("failed to write transformed subnet: %w", err)
  2225  		}
  2226  	}
  2227  	return nil
  2228  }
  2229  
  2230  func (s *state) writeSubnetSupplies() error {
  2231  	for subnetID, supply := range s.modifiedSupplies {
  2232  		supply := supply
  2233  		delete(s.modifiedSupplies, subnetID)
  2234  		s.supplyCache.Put(subnetID, &supply)
  2235  		if err := database.PutUInt64(s.supplyDB, subnetID[:], supply); err != nil {
  2236  			return fmt.Errorf("failed to write subnet supply: %w", err)
  2237  		}
  2238  	}
  2239  	return nil
  2240  }
  2241  
  2242  func (s *state) writeChains() error {
  2243  	for subnetID, chains := range s.addedChains {
  2244  		for _, chain := range chains {
  2245  			chainDB := s.getChainDB(subnetID)
  2246  
  2247  			chainID := chain.ID()
  2248  			if err := chainDB.Put(chainID[:], nil); err != nil {
  2249  				return fmt.Errorf("failed to write chain: %w", err)
  2250  			}
  2251  		}
  2252  		delete(s.addedChains, subnetID)
  2253  	}
  2254  	return nil
  2255  }
  2256  
  2257  func (s *state) writeMetadata() error {
  2258  	if !s.persistedTimestamp.Equal(s.timestamp) {
  2259  		if err := database.PutTimestamp(s.singletonDB, TimestampKey, s.timestamp); err != nil {
  2260  			return fmt.Errorf("failed to write timestamp: %w", err)
  2261  		}
  2262  		s.persistedTimestamp = s.timestamp
  2263  	}
  2264  	if s.persistedCurrentSupply != s.currentSupply {
  2265  		if err := database.PutUInt64(s.singletonDB, CurrentSupplyKey, s.currentSupply); err != nil {
  2266  			return fmt.Errorf("failed to write current supply: %w", err)
  2267  		}
  2268  		s.persistedCurrentSupply = s.currentSupply
  2269  	}
  2270  	if s.persistedLastAccepted != s.lastAccepted {
  2271  		if err := database.PutID(s.singletonDB, LastAcceptedKey, s.lastAccepted); err != nil {
  2272  			return fmt.Errorf("failed to write last accepted: %w", err)
  2273  		}
  2274  		s.persistedLastAccepted = s.lastAccepted
  2275  	}
  2276  	if s.indexedHeights != nil {
  2277  		indexedHeightsBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, s.indexedHeights)
  2278  		if err != nil {
  2279  			return err
  2280  		}
  2281  		if err := s.singletonDB.Put(HeightsIndexedKey, indexedHeightsBytes); err != nil {
  2282  			return fmt.Errorf("failed to write indexed range: %w", err)
  2283  		}
  2284  	}
  2285  	return nil
  2286  }
  2287  
  2288  // Returns the block and whether it is a [stateBlk].
  2289  // Invariant: blkBytes is safe to parse with blocks.GenesisCodec
  2290  //
  2291  // TODO: Remove after v1.12.x is activated
  2292  func parseStoredBlock(blkBytes []byte) (block.Block, bool, error) {
  2293  	// Attempt to parse as blocks.Block
  2294  	blk, err := block.Parse(block.GenesisCodec, blkBytes)
  2295  	if err == nil {
  2296  		return blk, false, nil
  2297  	}
  2298  
  2299  	// Fallback to [stateBlk]
  2300  	blkState := stateBlk{}
  2301  	if _, err := block.GenesisCodec.Unmarshal(blkBytes, &blkState); err != nil {
  2302  		return nil, false, err
  2303  	}
  2304  
  2305  	blk, err = block.Parse(block.GenesisCodec, blkState.Bytes)
  2306  	return blk, true, err
  2307  }
  2308  
  2309  func (s *state) ReindexBlocks(lock sync.Locker, log logging.Logger) error {
  2310  	has, err := s.singletonDB.Has(BlocksReindexedKey)
  2311  	if err != nil {
  2312  		return err
  2313  	}
  2314  	if has {
  2315  		log.Info("blocks already reindexed")
  2316  		return nil
  2317  	}
  2318  
  2319  	// It is possible that new blocks are added after grabbing this iterator.
  2320  	// New blocks are guaranteed to be persisted in the new format, so we don't
  2321  	// need to check them.
  2322  	blockIterator := s.blockDB.NewIterator()
  2323  	// Releasing is done using a closure to ensure that updating blockIterator
  2324  	// will result in having the most recent iterator released when executing
  2325  	// the deferred function.
  2326  	defer func() {
  2327  		blockIterator.Release()
  2328  	}()
  2329  
  2330  	log.Info("starting block reindexing")
  2331  
  2332  	var (
  2333  		startTime         = time.Now()
  2334  		lastCommit        = startTime
  2335  		nextUpdate        = startTime.Add(indexLogFrequency)
  2336  		numIndicesChecked = 0
  2337  		numIndicesUpdated = 0
  2338  	)
  2339  
  2340  	for blockIterator.Next() {
  2341  		valueBytes := blockIterator.Value()
  2342  		blk, isStateBlk, err := parseStoredBlock(valueBytes)
  2343  		if err != nil {
  2344  			return fmt.Errorf("failed to parse block: %w", err)
  2345  		}
  2346  
  2347  		blkID := blk.ID()
  2348  
  2349  		// This block was previously stored using the legacy format, update the
  2350  		// index to remove the usage of stateBlk.
  2351  		if isStateBlk {
  2352  			blkBytes := blk.Bytes()
  2353  			if err := s.blockDB.Put(blkID[:], blkBytes); err != nil {
  2354  				return fmt.Errorf("failed to write block: %w", err)
  2355  			}
  2356  
  2357  			numIndicesUpdated++
  2358  		}
  2359  
  2360  		numIndicesChecked++
  2361  
  2362  		now := time.Now()
  2363  		if now.After(nextUpdate) {
  2364  			nextUpdate = now.Add(indexLogFrequency)
  2365  
  2366  			progress := timer.ProgressFromHash(blkID[:])
  2367  			eta := timer.EstimateETA(
  2368  				startTime,
  2369  				progress,
  2370  				math.MaxUint64,
  2371  			)
  2372  
  2373  			log.Info("reindexing blocks",
  2374  				zap.Int("numIndicesUpdated", numIndicesUpdated),
  2375  				zap.Int("numIndicesChecked", numIndicesChecked),
  2376  				zap.Duration("eta", eta),
  2377  			)
  2378  		}
  2379  
  2380  		if numIndicesChecked%indexIterationLimit == 0 {
  2381  			// We must hold the lock during committing to make sure we don't
  2382  			// attempt to commit to disk while a block is concurrently being
  2383  			// accepted.
  2384  			lock.Lock()
  2385  			err := errors.Join(
  2386  				s.Commit(),
  2387  				blockIterator.Error(),
  2388  			)
  2389  			lock.Unlock()
  2390  			if err != nil {
  2391  				return err
  2392  			}
  2393  
  2394  			// We release the iterator here to allow the underlying database to
  2395  			// clean up deleted state.
  2396  			blockIterator.Release()
  2397  
  2398  			// We take the minimum here because it's possible that the node is
  2399  			// currently bootstrapping. This would mean that grabbing the lock
  2400  			// could take an extremely long period of time; which we should not
  2401  			// delay processing for.
  2402  			indexDuration := now.Sub(lastCommit)
  2403  			sleepDuration := min(
  2404  				indexIterationSleepMultiplier*indexDuration,
  2405  				indexIterationSleepCap,
  2406  			)
  2407  			time.Sleep(sleepDuration)
  2408  
  2409  			// Make sure not to include the sleep duration into the next index
  2410  			// duration.
  2411  			lastCommit = time.Now()
  2412  
  2413  			blockIterator = s.blockDB.NewIteratorWithStart(blkID[:])
  2414  		}
  2415  	}
  2416  
  2417  	// Ensure we fully iterated over all blocks before writing that indexing has
  2418  	// finished.
  2419  	//
  2420  	// Note: This is needed because a transient read error could cause the
  2421  	// iterator to stop early.
  2422  	if err := blockIterator.Error(); err != nil {
  2423  		return fmt.Errorf("failed to iterate over historical blocks: %w", err)
  2424  	}
  2425  
  2426  	if err := s.singletonDB.Put(BlocksReindexedKey, nil); err != nil {
  2427  		return fmt.Errorf("failed to put marked blocks as reindexed: %w", err)
  2428  	}
  2429  
  2430  	// We must hold the lock during committing to make sure we don't attempt to
  2431  	// commit to disk while a block is concurrently being accepted.
  2432  	lock.Lock()
  2433  	defer lock.Unlock()
  2434  
  2435  	log.Info("finished block reindexing",
  2436  		zap.Int("numIndicesUpdated", numIndicesUpdated),
  2437  		zap.Int("numIndicesChecked", numIndicesChecked),
  2438  		zap.Duration("duration", time.Since(startTime)),
  2439  	)
  2440  
  2441  	return s.Commit()
  2442  }