github.com/ava-labs/avalanchego@v1.11.11/vms/platformvm/state/state.go (about) 1 // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. 2 // See the file LICENSE for licensing terms. 3 4 package state 5 6 import ( 7 "context" 8 "errors" 9 "fmt" 10 "math" 11 "sync" 12 "time" 13 14 "github.com/google/btree" 15 "github.com/prometheus/client_golang/prometheus" 16 "go.uber.org/zap" 17 18 "github.com/ava-labs/avalanchego/cache" 19 "github.com/ava-labs/avalanchego/cache/metercacher" 20 "github.com/ava-labs/avalanchego/database" 21 "github.com/ava-labs/avalanchego/database/linkeddb" 22 "github.com/ava-labs/avalanchego/database/prefixdb" 23 "github.com/ava-labs/avalanchego/database/versiondb" 24 "github.com/ava-labs/avalanchego/ids" 25 "github.com/ava-labs/avalanchego/snow" 26 "github.com/ava-labs/avalanchego/snow/choices" 27 "github.com/ava-labs/avalanchego/snow/uptime" 28 "github.com/ava-labs/avalanchego/snow/validators" 29 "github.com/ava-labs/avalanchego/upgrade" 30 "github.com/ava-labs/avalanchego/utils/constants" 31 "github.com/ava-labs/avalanchego/utils/crypto/bls" 32 "github.com/ava-labs/avalanchego/utils/hashing" 33 "github.com/ava-labs/avalanchego/utils/iterator" 34 "github.com/ava-labs/avalanchego/utils/logging" 35 "github.com/ava-labs/avalanchego/utils/timer" 36 "github.com/ava-labs/avalanchego/utils/wrappers" 37 "github.com/ava-labs/avalanchego/vms/components/avax" 38 "github.com/ava-labs/avalanchego/vms/components/gas" 39 "github.com/ava-labs/avalanchego/vms/platformvm/block" 40 "github.com/ava-labs/avalanchego/vms/platformvm/config" 41 "github.com/ava-labs/avalanchego/vms/platformvm/fx" 42 "github.com/ava-labs/avalanchego/vms/platformvm/genesis" 43 "github.com/ava-labs/avalanchego/vms/platformvm/metrics" 44 "github.com/ava-labs/avalanchego/vms/platformvm/reward" 45 "github.com/ava-labs/avalanchego/vms/platformvm/status" 46 "github.com/ava-labs/avalanchego/vms/platformvm/txs" 47 48 safemath "github.com/ava-labs/avalanchego/utils/math" 49 ) 50 51 const ( 52 defaultTreeDegree = 2 53 indexIterationLimit = 4096 54 indexIterationSleepMultiplier = 5 55 indexIterationSleepCap = 10 * time.Second 56 indexLogFrequency = 30 * time.Second 57 ) 58 59 var ( 60 _ State = (*state)(nil) 61 62 errValidatorSetAlreadyPopulated = errors.New("validator set already populated") 63 errIsNotSubnet = errors.New("is not a subnet") 64 65 BlockIDPrefix = []byte("blockID") 66 BlockPrefix = []byte("block") 67 ValidatorsPrefix = []byte("validators") 68 CurrentPrefix = []byte("current") 69 PendingPrefix = []byte("pending") 70 ValidatorPrefix = []byte("validator") 71 DelegatorPrefix = []byte("delegator") 72 SubnetValidatorPrefix = []byte("subnetValidator") 73 SubnetDelegatorPrefix = []byte("subnetDelegator") 74 ValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") 75 ValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") 76 TxPrefix = []byte("tx") 77 RewardUTXOsPrefix = []byte("rewardUTXOs") 78 UTXOPrefix = []byte("utxo") 79 SubnetPrefix = []byte("subnet") 80 SubnetOwnerPrefix = []byte("subnetOwner") 81 SubnetManagerPrefix = []byte("subnetManager") 82 TransformedSubnetPrefix = []byte("transformedSubnet") 83 SupplyPrefix = []byte("supply") 84 ChainPrefix = []byte("chain") 85 SingletonPrefix = []byte("singleton") 86 87 TimestampKey = []byte("timestamp") 88 FeeStateKey = []byte("fee state") 89 CurrentSupplyKey = []byte("current supply") 90 LastAcceptedKey = []byte("last accepted") 91 HeightsIndexedKey = []byte("heights indexed") 92 InitializedKey = []byte("initialized") 93 BlocksReindexedKey = []byte("blocks reindexed") 94 ) 95 96 // Chain collects all methods to manage the state of the chain for block 97 // execution. 98 type Chain interface { 99 Stakers 100 avax.UTXOAdder 101 avax.UTXOGetter 102 avax.UTXODeleter 103 104 GetTimestamp() time.Time 105 SetTimestamp(tm time.Time) 106 107 GetFeeState() gas.State 108 SetFeeState(f gas.State) 109 110 GetCurrentSupply(subnetID ids.ID) (uint64, error) 111 SetCurrentSupply(subnetID ids.ID, cs uint64) 112 113 AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) 114 115 AddSubnet(subnetID ids.ID) 116 117 GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) 118 SetSubnetOwner(subnetID ids.ID, owner fx.Owner) 119 120 GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) 121 SetSubnetManager(subnetID ids.ID, chainID ids.ID, addr []byte) 122 123 GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) 124 AddSubnetTransformation(transformSubnetTx *txs.Tx) 125 126 AddChain(createChainTx *txs.Tx) 127 128 GetTx(txID ids.ID) (*txs.Tx, status.Status, error) 129 AddTx(tx *txs.Tx, status status.Status) 130 } 131 132 type State interface { 133 Chain 134 uptime.State 135 avax.UTXOReader 136 137 GetLastAccepted() ids.ID 138 SetLastAccepted(blkID ids.ID) 139 140 GetStatelessBlock(blockID ids.ID) (block.Block, error) 141 142 // Invariant: [block] is an accepted block. 143 AddStatelessBlock(block block.Block) 144 145 GetBlockIDAtHeight(height uint64) (ids.ID, error) 146 147 GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) 148 GetSubnetIDs() ([]ids.ID, error) 149 GetChains(subnetID ids.ID) ([]*txs.Tx, error) 150 151 // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis 152 // block until it has applied all of the diffs up to and including 153 // [endHeight]. Applying the diffs modifies [validators]. 154 // 155 // Invariant: If attempting to generate the validator set for 156 // [endHeight - 1], [validators] must initially contain the validator 157 // weights for [startHeight]. 158 // 159 // Note: Because this function iterates towards the genesis, [startHeight] 160 // will typically be greater than or equal to [endHeight]. If [startHeight] 161 // is less than [endHeight], no diffs will be applied. 162 ApplyValidatorWeightDiffs( 163 ctx context.Context, 164 validators map[ids.NodeID]*validators.GetValidatorOutput, 165 startHeight uint64, 166 endHeight uint64, 167 subnetID ids.ID, 168 ) error 169 170 // ApplyValidatorPublicKeyDiffs iterates from [startHeight] towards the 171 // genesis block until it has applied all of the diffs up to and including 172 // [endHeight]. Applying the diffs modifies [validators]. 173 // 174 // Invariant: If attempting to generate the validator set for 175 // [endHeight - 1], [validators] must initially contain the validator 176 // weights for [startHeight]. 177 // 178 // Note: Because this function iterates towards the genesis, [startHeight] 179 // will typically be greater than or equal to [endHeight]. If [startHeight] 180 // is less than [endHeight], no diffs will be applied. 181 ApplyValidatorPublicKeyDiffs( 182 ctx context.Context, 183 validators map[ids.NodeID]*validators.GetValidatorOutput, 184 startHeight uint64, 185 endHeight uint64, 186 ) error 187 188 SetHeight(height uint64) 189 190 // Discard uncommitted changes to the database. 191 Abort() 192 193 // ReindexBlocks converts any block indices using the legacy storage format 194 // to the new format. If this database has already updated the indices, 195 // this function will return immediately, without iterating over the 196 // database. 197 // 198 // TODO: Remove after v1.12.x is activated 199 ReindexBlocks(lock sync.Locker, log logging.Logger) error 200 201 // Commit changes to the base database. 202 Commit() error 203 204 // Returns a batch of unwritten changes that, when written, will commit all 205 // pending changes to the base database. 206 CommitBatch() (database.Batch, error) 207 208 Checksum() ids.ID 209 210 Close() error 211 } 212 213 // Prior to https://github.com/ava-labs/avalanchego/pull/1719, blocks were 214 // stored as a map from blkID to stateBlk. Nodes synced prior to this PR may 215 // still have blocks partially stored using this legacy format. 216 // 217 // TODO: Remove after v1.12.x is activated 218 type stateBlk struct { 219 Bytes []byte `serialize:"true"` 220 Status choices.Status `serialize:"true"` 221 } 222 223 /* 224 * VMDB 225 * |-. validators 226 * | |-. current 227 * | | |-. validator 228 * | | | '-. list 229 * | | | '-- txID -> uptime + potential reward + potential delegatee reward 230 * | | |-. delegator 231 * | | | '-. list 232 * | | | '-- txID -> potential reward 233 * | | |-. subnetValidator 234 * | | | '-. list 235 * | | | '-- txID -> uptime + potential reward + potential delegatee reward 236 * | | '-. subnetDelegator 237 * | | '-. list 238 * | | '-- txID -> potential reward 239 * | |-. pending 240 * | | |-. validator 241 * | | | '-. list 242 * | | | '-- txID -> nil 243 * | | |-. delegator 244 * | | | '-. list 245 * | | | '-- txID -> nil 246 * | | |-. subnetValidator 247 * | | | '-. list 248 * | | | '-- txID -> nil 249 * | | '-. subnetDelegator 250 * | | '-. list 251 * | | '-- txID -> nil 252 * | |-. weight diffs 253 * | | '-- subnet+height+nodeID -> weightChange 254 * | '-. pub key diffs 255 * | '-- subnet+height+nodeID -> uncompressed public key or nil 256 * |-. blockIDs 257 * | '-- height -> blockID 258 * |-. blocks 259 * | '-- blockID -> block bytes 260 * |-. txs 261 * | '-- txID -> tx bytes + tx status 262 * |- rewardUTXOs 263 * | '-. txID 264 * | '-. list 265 * | '-- utxoID -> utxo bytes 266 * |- utxos 267 * | '-- utxoDB 268 * |-. subnets 269 * | '-. list 270 * | '-- txID -> nil 271 * |-. subnetOwners 272 * | '-. subnetID -> owner 273 * |-. chains 274 * | '-. subnetID 275 * | '-. list 276 * | '-- txID -> nil 277 * '-. singletons 278 * |-- initializedKey -> nil 279 * |-- blocksReindexedKey -> nil 280 * |-- timestampKey -> timestamp 281 * |-- feeStateKey -> feeState 282 * |-- currentSupplyKey -> currentSupply 283 * |-- lastAcceptedKey -> lastAccepted 284 * '-- heightsIndexKey -> startIndexHeight + endIndexHeight 285 */ 286 type state struct { 287 validatorState 288 289 validators validators.Manager 290 ctx *snow.Context 291 upgrades upgrade.Config 292 metrics metrics.Metrics 293 rewards reward.Calculator 294 295 baseDB *versiondb.Database 296 297 currentStakers *baseStakers 298 pendingStakers *baseStakers 299 300 currentHeight uint64 301 302 addedBlockIDs map[uint64]ids.ID // map of height -> blockID 303 blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID; if the entry is ids.Empty, it is not in the database 304 blockIDDB database.Database 305 306 addedBlocks map[ids.ID]block.Block // map of blockID -> Block 307 blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block; if the entry is nil, it is not in the database 308 blockDB database.Database 309 310 validatorsDB database.Database 311 currentValidatorsDB database.Database 312 currentValidatorBaseDB database.Database 313 currentValidatorList linkeddb.LinkedDB 314 currentDelegatorBaseDB database.Database 315 currentDelegatorList linkeddb.LinkedDB 316 currentSubnetValidatorBaseDB database.Database 317 currentSubnetValidatorList linkeddb.LinkedDB 318 currentSubnetDelegatorBaseDB database.Database 319 currentSubnetDelegatorList linkeddb.LinkedDB 320 pendingValidatorsDB database.Database 321 pendingValidatorBaseDB database.Database 322 pendingValidatorList linkeddb.LinkedDB 323 pendingDelegatorBaseDB database.Database 324 pendingDelegatorList linkeddb.LinkedDB 325 pendingSubnetValidatorBaseDB database.Database 326 pendingSubnetValidatorList linkeddb.LinkedDB 327 pendingSubnetDelegatorBaseDB database.Database 328 pendingSubnetDelegatorList linkeddb.LinkedDB 329 330 validatorWeightDiffsDB database.Database 331 validatorPublicKeyDiffsDB database.Database 332 333 addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} 334 txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}; if the entry is nil, it is not in the database 335 txDB database.Database 336 337 addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO 338 rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO 339 rewardUTXODB database.Database 340 341 modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO; if the UTXO is nil, it has been removed 342 utxoDB database.Database 343 utxoState avax.UTXOState 344 345 cachedSubnetIDs []ids.ID // nil if the subnets haven't been loaded 346 addedSubnetIDs []ids.ID 347 subnetBaseDB database.Database 348 subnetDB linkeddb.LinkedDB 349 350 subnetOwners map[ids.ID]fx.Owner // map of subnetID -> owner 351 subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner; if the entry is nil, it is not in the database 352 subnetOwnerDB database.Database 353 354 subnetManagers map[ids.ID]chainIDAndAddr // map of subnetID -> manager of the subnet 355 subnetManagerCache cache.Cacher[ids.ID, chainIDAndAddr] // cache of subnetID -> manager 356 subnetManagerDB database.Database 357 358 transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx 359 transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx; if the entry is nil, it is not in the database 360 transformedSubnetDB database.Database 361 362 modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply 363 supplyCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply; if the entry is nil, it is not in the database 364 supplyDB database.Database 365 366 addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet 367 chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx 368 chainDBCache cache.Cacher[ids.ID, linkeddb.LinkedDB] // cache of subnetID -> linkedDB 369 chainDB database.Database 370 371 // The persisted fields represent the current database value 372 timestamp, persistedTimestamp time.Time 373 feeState, persistedFeeState gas.State 374 currentSupply, persistedCurrentSupply uint64 375 // [lastAccepted] is the most recently accepted block. 376 lastAccepted, persistedLastAccepted ids.ID 377 // TODO: Remove indexedHeights once v1.11.3 has been released. 378 indexedHeights *heightRange 379 singletonDB database.Database 380 } 381 382 // heightRange is used to track which heights are safe to use the native DB 383 // iterator for querying validator diffs. 384 // 385 // TODO: Remove once we are guaranteed nodes can not rollback to not support the 386 // new indexing mechanism. 387 type heightRange struct { 388 LowerBound uint64 `serialize:"true"` 389 UpperBound uint64 `serialize:"true"` 390 } 391 392 type ValidatorWeightDiff struct { 393 Decrease bool `serialize:"true"` 394 Amount uint64 `serialize:"true"` 395 } 396 397 func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error { 398 if v.Decrease == negative { 399 var err error 400 v.Amount, err = safemath.Add(v.Amount, amount) 401 return err 402 } 403 404 if v.Amount > amount { 405 v.Amount -= amount 406 } else { 407 v.Amount = safemath.AbsDiff(v.Amount, amount) 408 v.Decrease = negative 409 } 410 return nil 411 } 412 413 type txBytesAndStatus struct { 414 Tx []byte `serialize:"true"` 415 Status status.Status `serialize:"true"` 416 } 417 418 type txAndStatus struct { 419 tx *txs.Tx 420 status status.Status 421 } 422 423 type fxOwnerAndSize struct { 424 owner fx.Owner 425 size int 426 } 427 428 type chainIDAndAddr struct { 429 ChainID ids.ID `serialize:"true"` 430 Addr []byte `serialize:"true"` 431 } 432 433 func txSize(_ ids.ID, tx *txs.Tx) int { 434 if tx == nil { 435 return ids.IDLen + constants.PointerOverhead 436 } 437 return ids.IDLen + len(tx.Bytes()) + constants.PointerOverhead 438 } 439 440 func txAndStatusSize(_ ids.ID, t *txAndStatus) int { 441 if t == nil { 442 return ids.IDLen + constants.PointerOverhead 443 } 444 return ids.IDLen + len(t.tx.Bytes()) + wrappers.IntLen + 2*constants.PointerOverhead 445 } 446 447 func blockSize(_ ids.ID, blk block.Block) int { 448 if blk == nil { 449 return ids.IDLen + constants.PointerOverhead 450 } 451 return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead 452 } 453 454 func New( 455 db database.Database, 456 genesisBytes []byte, 457 metricsReg prometheus.Registerer, 458 validators validators.Manager, 459 upgrades upgrade.Config, 460 execCfg *config.ExecutionConfig, 461 ctx *snow.Context, 462 metrics metrics.Metrics, 463 rewards reward.Calculator, 464 ) (State, error) { 465 blockIDCache, err := metercacher.New[uint64, ids.ID]( 466 "block_id_cache", 467 metricsReg, 468 &cache.LRU[uint64, ids.ID]{Size: execCfg.BlockIDCacheSize}, 469 ) 470 if err != nil { 471 return nil, err 472 } 473 474 blockCache, err := metercacher.New[ids.ID, block.Block]( 475 "block_cache", 476 metricsReg, 477 cache.NewSizedLRU[ids.ID, block.Block](execCfg.BlockCacheSize, blockSize), 478 ) 479 if err != nil { 480 return nil, err 481 } 482 483 baseDB := versiondb.New(db) 484 485 validatorsDB := prefixdb.New(ValidatorsPrefix, baseDB) 486 487 currentValidatorsDB := prefixdb.New(CurrentPrefix, validatorsDB) 488 currentValidatorBaseDB := prefixdb.New(ValidatorPrefix, currentValidatorsDB) 489 currentDelegatorBaseDB := prefixdb.New(DelegatorPrefix, currentValidatorsDB) 490 currentSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, currentValidatorsDB) 491 currentSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, currentValidatorsDB) 492 493 pendingValidatorsDB := prefixdb.New(PendingPrefix, validatorsDB) 494 pendingValidatorBaseDB := prefixdb.New(ValidatorPrefix, pendingValidatorsDB) 495 pendingDelegatorBaseDB := prefixdb.New(DelegatorPrefix, pendingValidatorsDB) 496 pendingSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, pendingValidatorsDB) 497 pendingSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, pendingValidatorsDB) 498 499 validatorWeightDiffsDB := prefixdb.New(ValidatorWeightDiffsPrefix, validatorsDB) 500 validatorPublicKeyDiffsDB := prefixdb.New(ValidatorPublicKeyDiffsPrefix, validatorsDB) 501 502 txCache, err := metercacher.New( 503 "tx_cache", 504 metricsReg, 505 cache.NewSizedLRU[ids.ID, *txAndStatus](execCfg.TxCacheSize, txAndStatusSize), 506 ) 507 if err != nil { 508 return nil, err 509 } 510 511 rewardUTXODB := prefixdb.New(RewardUTXOsPrefix, baseDB) 512 rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( 513 "reward_utxos_cache", 514 metricsReg, 515 &cache.LRU[ids.ID, []*avax.UTXO]{Size: execCfg.RewardUTXOsCacheSize}, 516 ) 517 if err != nil { 518 return nil, err 519 } 520 521 utxoDB := prefixdb.New(UTXOPrefix, baseDB) 522 utxoState, err := avax.NewMeteredUTXOState(utxoDB, txs.GenesisCodec, metricsReg, execCfg.ChecksumsEnabled) 523 if err != nil { 524 return nil, err 525 } 526 527 subnetBaseDB := prefixdb.New(SubnetPrefix, baseDB) 528 529 subnetOwnerDB := prefixdb.New(SubnetOwnerPrefix, baseDB) 530 subnetOwnerCache, err := metercacher.New[ids.ID, fxOwnerAndSize]( 531 "subnet_owner_cache", 532 metricsReg, 533 cache.NewSizedLRU[ids.ID, fxOwnerAndSize](execCfg.FxOwnerCacheSize, func(_ ids.ID, f fxOwnerAndSize) int { 534 return ids.IDLen + f.size 535 }), 536 ) 537 if err != nil { 538 return nil, err 539 } 540 541 subnetManagerDB := prefixdb.New(SubnetManagerPrefix, baseDB) 542 subnetManagerCache, err := metercacher.New[ids.ID, chainIDAndAddr]( 543 "subnet_manager_cache", 544 metricsReg, 545 cache.NewSizedLRU[ids.ID, chainIDAndAddr](execCfg.SubnetManagerCacheSize, func(_ ids.ID, f chainIDAndAddr) int { 546 return 2*ids.IDLen + len(f.Addr) 547 }), 548 ) 549 if err != nil { 550 return nil, err 551 } 552 553 transformedSubnetCache, err := metercacher.New( 554 "transformed_subnet_cache", 555 metricsReg, 556 cache.NewSizedLRU[ids.ID, *txs.Tx](execCfg.TransformedSubnetTxCacheSize, txSize), 557 ) 558 if err != nil { 559 return nil, err 560 } 561 562 supplyCache, err := metercacher.New[ids.ID, *uint64]( 563 "supply_cache", 564 metricsReg, 565 &cache.LRU[ids.ID, *uint64]{Size: execCfg.ChainCacheSize}, 566 ) 567 if err != nil { 568 return nil, err 569 } 570 571 chainCache, err := metercacher.New[ids.ID, []*txs.Tx]( 572 "chain_cache", 573 metricsReg, 574 &cache.LRU[ids.ID, []*txs.Tx]{Size: execCfg.ChainCacheSize}, 575 ) 576 if err != nil { 577 return nil, err 578 } 579 580 chainDBCache, err := metercacher.New[ids.ID, linkeddb.LinkedDB]( 581 "chain_db_cache", 582 metricsReg, 583 &cache.LRU[ids.ID, linkeddb.LinkedDB]{Size: execCfg.ChainDBCacheSize}, 584 ) 585 if err != nil { 586 return nil, err 587 } 588 589 s := &state{ 590 validatorState: newValidatorState(), 591 592 validators: validators, 593 ctx: ctx, 594 upgrades: upgrades, 595 metrics: metrics, 596 rewards: rewards, 597 baseDB: baseDB, 598 599 addedBlockIDs: make(map[uint64]ids.ID), 600 blockIDCache: blockIDCache, 601 blockIDDB: prefixdb.New(BlockIDPrefix, baseDB), 602 603 addedBlocks: make(map[ids.ID]block.Block), 604 blockCache: blockCache, 605 blockDB: prefixdb.New(BlockPrefix, baseDB), 606 607 currentStakers: newBaseStakers(), 608 pendingStakers: newBaseStakers(), 609 610 validatorsDB: validatorsDB, 611 currentValidatorsDB: currentValidatorsDB, 612 currentValidatorBaseDB: currentValidatorBaseDB, 613 currentValidatorList: linkeddb.NewDefault(currentValidatorBaseDB), 614 currentDelegatorBaseDB: currentDelegatorBaseDB, 615 currentDelegatorList: linkeddb.NewDefault(currentDelegatorBaseDB), 616 currentSubnetValidatorBaseDB: currentSubnetValidatorBaseDB, 617 currentSubnetValidatorList: linkeddb.NewDefault(currentSubnetValidatorBaseDB), 618 currentSubnetDelegatorBaseDB: currentSubnetDelegatorBaseDB, 619 currentSubnetDelegatorList: linkeddb.NewDefault(currentSubnetDelegatorBaseDB), 620 pendingValidatorsDB: pendingValidatorsDB, 621 pendingValidatorBaseDB: pendingValidatorBaseDB, 622 pendingValidatorList: linkeddb.NewDefault(pendingValidatorBaseDB), 623 pendingDelegatorBaseDB: pendingDelegatorBaseDB, 624 pendingDelegatorList: linkeddb.NewDefault(pendingDelegatorBaseDB), 625 pendingSubnetValidatorBaseDB: pendingSubnetValidatorBaseDB, 626 pendingSubnetValidatorList: linkeddb.NewDefault(pendingSubnetValidatorBaseDB), 627 pendingSubnetDelegatorBaseDB: pendingSubnetDelegatorBaseDB, 628 pendingSubnetDelegatorList: linkeddb.NewDefault(pendingSubnetDelegatorBaseDB), 629 validatorWeightDiffsDB: validatorWeightDiffsDB, 630 validatorPublicKeyDiffsDB: validatorPublicKeyDiffsDB, 631 632 addedTxs: make(map[ids.ID]*txAndStatus), 633 txDB: prefixdb.New(TxPrefix, baseDB), 634 txCache: txCache, 635 636 addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), 637 rewardUTXODB: rewardUTXODB, 638 rewardUTXOsCache: rewardUTXOsCache, 639 640 modifiedUTXOs: make(map[ids.ID]*avax.UTXO), 641 utxoDB: utxoDB, 642 utxoState: utxoState, 643 644 subnetBaseDB: subnetBaseDB, 645 subnetDB: linkeddb.NewDefault(subnetBaseDB), 646 647 subnetOwners: make(map[ids.ID]fx.Owner), 648 subnetOwnerDB: subnetOwnerDB, 649 subnetOwnerCache: subnetOwnerCache, 650 651 subnetManagers: make(map[ids.ID]chainIDAndAddr), 652 subnetManagerDB: subnetManagerDB, 653 subnetManagerCache: subnetManagerCache, 654 655 transformedSubnets: make(map[ids.ID]*txs.Tx), 656 transformedSubnetCache: transformedSubnetCache, 657 transformedSubnetDB: prefixdb.New(TransformedSubnetPrefix, baseDB), 658 659 modifiedSupplies: make(map[ids.ID]uint64), 660 supplyCache: supplyCache, 661 supplyDB: prefixdb.New(SupplyPrefix, baseDB), 662 663 addedChains: make(map[ids.ID][]*txs.Tx), 664 chainDB: prefixdb.New(ChainPrefix, baseDB), 665 chainCache: chainCache, 666 chainDBCache: chainDBCache, 667 668 singletonDB: prefixdb.New(SingletonPrefix, baseDB), 669 } 670 671 if err := s.sync(genesisBytes); err != nil { 672 return nil, errors.Join( 673 err, 674 s.Close(), 675 ) 676 } 677 678 return s, nil 679 } 680 681 func (s *state) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { 682 return s.currentStakers.GetValidator(subnetID, nodeID) 683 } 684 685 func (s *state) PutCurrentValidator(staker *Staker) error { 686 s.currentStakers.PutValidator(staker) 687 return nil 688 } 689 690 func (s *state) DeleteCurrentValidator(staker *Staker) { 691 s.currentStakers.DeleteValidator(staker) 692 } 693 694 func (s *state) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (iterator.Iterator[*Staker], error) { 695 return s.currentStakers.GetDelegatorIterator(subnetID, nodeID), nil 696 } 697 698 func (s *state) PutCurrentDelegator(staker *Staker) { 699 s.currentStakers.PutDelegator(staker) 700 } 701 702 func (s *state) DeleteCurrentDelegator(staker *Staker) { 703 s.currentStakers.DeleteDelegator(staker) 704 } 705 706 func (s *state) GetCurrentStakerIterator() (iterator.Iterator[*Staker], error) { 707 return s.currentStakers.GetStakerIterator(), nil 708 } 709 710 func (s *state) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { 711 return s.pendingStakers.GetValidator(subnetID, nodeID) 712 } 713 714 func (s *state) PutPendingValidator(staker *Staker) error { 715 s.pendingStakers.PutValidator(staker) 716 return nil 717 } 718 719 func (s *state) DeletePendingValidator(staker *Staker) { 720 s.pendingStakers.DeleteValidator(staker) 721 } 722 723 func (s *state) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (iterator.Iterator[*Staker], error) { 724 return s.pendingStakers.GetDelegatorIterator(subnetID, nodeID), nil 725 } 726 727 func (s *state) PutPendingDelegator(staker *Staker) { 728 s.pendingStakers.PutDelegator(staker) 729 } 730 731 func (s *state) DeletePendingDelegator(staker *Staker) { 732 s.pendingStakers.DeleteDelegator(staker) 733 } 734 735 func (s *state) GetPendingStakerIterator() (iterator.Iterator[*Staker], error) { 736 return s.pendingStakers.GetStakerIterator(), nil 737 } 738 739 func (s *state) GetSubnetIDs() ([]ids.ID, error) { 740 if s.cachedSubnetIDs != nil { 741 return s.cachedSubnetIDs, nil 742 } 743 744 subnetDBIt := s.subnetDB.NewIterator() 745 defer subnetDBIt.Release() 746 747 subnetIDs := []ids.ID{} 748 for subnetDBIt.Next() { 749 subnetIDBytes := subnetDBIt.Key() 750 subnetID, err := ids.ToID(subnetIDBytes) 751 if err != nil { 752 return nil, err 753 } 754 subnetIDs = append(subnetIDs, subnetID) 755 } 756 if err := subnetDBIt.Error(); err != nil { 757 return nil, err 758 } 759 subnetIDs = append(subnetIDs, s.addedSubnetIDs...) 760 s.cachedSubnetIDs = subnetIDs 761 return subnetIDs, nil 762 } 763 764 func (s *state) AddSubnet(subnetID ids.ID) { 765 s.addedSubnetIDs = append(s.addedSubnetIDs, subnetID) 766 if s.cachedSubnetIDs != nil { 767 s.cachedSubnetIDs = append(s.cachedSubnetIDs, subnetID) 768 } 769 } 770 771 func (s *state) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { 772 if owner, exists := s.subnetOwners[subnetID]; exists { 773 return owner, nil 774 } 775 776 if ownerAndSize, cached := s.subnetOwnerCache.Get(subnetID); cached { 777 if ownerAndSize.owner == nil { 778 return nil, database.ErrNotFound 779 } 780 return ownerAndSize.owner, nil 781 } 782 783 ownerBytes, err := s.subnetOwnerDB.Get(subnetID[:]) 784 if err == nil { 785 var owner fx.Owner 786 if _, err := block.GenesisCodec.Unmarshal(ownerBytes, &owner); err != nil { 787 return nil, err 788 } 789 s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ 790 owner: owner, 791 size: len(ownerBytes), 792 }) 793 return owner, nil 794 } 795 if err != database.ErrNotFound { 796 return nil, err 797 } 798 799 subnetIntf, _, err := s.GetTx(subnetID) 800 if err != nil { 801 if err == database.ErrNotFound { 802 s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{}) 803 } 804 return nil, err 805 } 806 807 subnet, ok := subnetIntf.Unsigned.(*txs.CreateSubnetTx) 808 if !ok { 809 return nil, fmt.Errorf("%q %w", subnetID, errIsNotSubnet) 810 } 811 812 s.SetSubnetOwner(subnetID, subnet.Owner) 813 return subnet.Owner, nil 814 } 815 816 func (s *state) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { 817 s.subnetOwners[subnetID] = owner 818 } 819 820 func (s *state) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { 821 if chainIDAndAddr, exists := s.subnetManagers[subnetID]; exists { 822 return chainIDAndAddr.ChainID, chainIDAndAddr.Addr, nil 823 } 824 825 if chainIDAndAddr, cached := s.subnetManagerCache.Get(subnetID); cached { 826 return chainIDAndAddr.ChainID, chainIDAndAddr.Addr, nil 827 } 828 829 chainIDAndAddrBytes, err := s.subnetManagerDB.Get(subnetID[:]) 830 if err != nil { 831 return ids.Empty, nil, err 832 } 833 834 var manager chainIDAndAddr 835 if _, err := block.GenesisCodec.Unmarshal(chainIDAndAddrBytes, &manager); err != nil { 836 return ids.Empty, nil, err 837 } 838 s.subnetManagerCache.Put(subnetID, manager) 839 return manager.ChainID, manager.Addr, nil 840 } 841 842 func (s *state) SetSubnetManager(subnetID ids.ID, chainID ids.ID, addr []byte) { 843 s.subnetManagers[subnetID] = chainIDAndAddr{ 844 ChainID: chainID, 845 Addr: addr, 846 } 847 } 848 849 func (s *state) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { 850 if tx, exists := s.transformedSubnets[subnetID]; exists { 851 return tx, nil 852 } 853 854 if tx, cached := s.transformedSubnetCache.Get(subnetID); cached { 855 if tx == nil { 856 return nil, database.ErrNotFound 857 } 858 return tx, nil 859 } 860 861 transformSubnetTxID, err := database.GetID(s.transformedSubnetDB, subnetID[:]) 862 if err == database.ErrNotFound { 863 s.transformedSubnetCache.Put(subnetID, nil) 864 return nil, database.ErrNotFound 865 } 866 if err != nil { 867 return nil, err 868 } 869 870 transformSubnetTx, _, err := s.GetTx(transformSubnetTxID) 871 if err != nil { 872 return nil, err 873 } 874 s.transformedSubnetCache.Put(subnetID, transformSubnetTx) 875 return transformSubnetTx, nil 876 } 877 878 func (s *state) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { 879 transformSubnetTx := transformSubnetTxIntf.Unsigned.(*txs.TransformSubnetTx) 880 s.transformedSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf 881 } 882 883 func (s *state) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { 884 if chains, cached := s.chainCache.Get(subnetID); cached { 885 return chains, nil 886 } 887 chainDB := s.getChainDB(subnetID) 888 chainDBIt := chainDB.NewIterator() 889 defer chainDBIt.Release() 890 891 txs := []*txs.Tx(nil) 892 for chainDBIt.Next() { 893 chainIDBytes := chainDBIt.Key() 894 chainID, err := ids.ToID(chainIDBytes) 895 if err != nil { 896 return nil, err 897 } 898 chainTx, _, err := s.GetTx(chainID) 899 if err != nil { 900 return nil, err 901 } 902 txs = append(txs, chainTx) 903 } 904 if err := chainDBIt.Error(); err != nil { 905 return nil, err 906 } 907 txs = append(txs, s.addedChains[subnetID]...) 908 s.chainCache.Put(subnetID, txs) 909 return txs, nil 910 } 911 912 func (s *state) AddChain(createChainTxIntf *txs.Tx) { 913 createChainTx := createChainTxIntf.Unsigned.(*txs.CreateChainTx) 914 subnetID := createChainTx.SubnetID 915 s.addedChains[subnetID] = append(s.addedChains[subnetID], createChainTxIntf) 916 if chains, cached := s.chainCache.Get(subnetID); cached { 917 chains = append(chains, createChainTxIntf) 918 s.chainCache.Put(subnetID, chains) 919 } 920 } 921 922 func (s *state) getChainDB(subnetID ids.ID) linkeddb.LinkedDB { 923 if chainDB, cached := s.chainDBCache.Get(subnetID); cached { 924 return chainDB 925 } 926 rawChainDB := prefixdb.New(subnetID[:], s.chainDB) 927 chainDB := linkeddb.NewDefault(rawChainDB) 928 s.chainDBCache.Put(subnetID, chainDB) 929 return chainDB 930 } 931 932 func (s *state) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { 933 if tx, exists := s.addedTxs[txID]; exists { 934 return tx.tx, tx.status, nil 935 } 936 if tx, cached := s.txCache.Get(txID); cached { 937 if tx == nil { 938 return nil, status.Unknown, database.ErrNotFound 939 } 940 return tx.tx, tx.status, nil 941 } 942 txBytes, err := s.txDB.Get(txID[:]) 943 if err == database.ErrNotFound { 944 s.txCache.Put(txID, nil) 945 return nil, status.Unknown, database.ErrNotFound 946 } else if err != nil { 947 return nil, status.Unknown, err 948 } 949 950 stx := txBytesAndStatus{} 951 if _, err := txs.GenesisCodec.Unmarshal(txBytes, &stx); err != nil { 952 return nil, status.Unknown, err 953 } 954 955 tx, err := txs.Parse(txs.GenesisCodec, stx.Tx) 956 if err != nil { 957 return nil, status.Unknown, err 958 } 959 960 ptx := &txAndStatus{ 961 tx: tx, 962 status: stx.Status, 963 } 964 965 s.txCache.Put(txID, ptx) 966 return ptx.tx, ptx.status, nil 967 } 968 969 func (s *state) AddTx(tx *txs.Tx, status status.Status) { 970 s.addedTxs[tx.ID()] = &txAndStatus{ 971 tx: tx, 972 status: status, 973 } 974 } 975 976 func (s *state) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { 977 if utxos, exists := s.addedRewardUTXOs[txID]; exists { 978 return utxos, nil 979 } 980 if utxos, exists := s.rewardUTXOsCache.Get(txID); exists { 981 return utxos, nil 982 } 983 984 rawTxDB := prefixdb.New(txID[:], s.rewardUTXODB) 985 txDB := linkeddb.NewDefault(rawTxDB) 986 it := txDB.NewIterator() 987 defer it.Release() 988 989 utxos := []*avax.UTXO(nil) 990 for it.Next() { 991 utxo := &avax.UTXO{} 992 if _, err := txs.Codec.Unmarshal(it.Value(), utxo); err != nil { 993 return nil, err 994 } 995 utxos = append(utxos, utxo) 996 } 997 if err := it.Error(); err != nil { 998 return nil, err 999 } 1000 1001 s.rewardUTXOsCache.Put(txID, utxos) 1002 return utxos, nil 1003 } 1004 1005 func (s *state) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { 1006 s.addedRewardUTXOs[txID] = append(s.addedRewardUTXOs[txID], utxo) 1007 } 1008 1009 func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { 1010 if utxo, exists := s.modifiedUTXOs[utxoID]; exists { 1011 if utxo == nil { 1012 return nil, database.ErrNotFound 1013 } 1014 return utxo, nil 1015 } 1016 return s.utxoState.GetUTXO(utxoID) 1017 } 1018 1019 func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { 1020 return s.utxoState.UTXOIDs(addr, start, limit) 1021 } 1022 1023 func (s *state) AddUTXO(utxo *avax.UTXO) { 1024 s.modifiedUTXOs[utxo.InputID()] = utxo 1025 } 1026 1027 func (s *state) DeleteUTXO(utxoID ids.ID) { 1028 s.modifiedUTXOs[utxoID] = nil 1029 } 1030 1031 func (s *state) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { 1032 staker, err := s.currentStakers.GetValidator(subnetID, nodeID) 1033 if err != nil { 1034 return time.Time{}, err 1035 } 1036 return staker.StartTime, nil 1037 } 1038 1039 func (s *state) GetTimestamp() time.Time { 1040 return s.timestamp 1041 } 1042 1043 func (s *state) SetTimestamp(tm time.Time) { 1044 s.timestamp = tm 1045 } 1046 1047 func (s *state) GetFeeState() gas.State { 1048 return s.feeState 1049 } 1050 1051 func (s *state) SetFeeState(feeState gas.State) { 1052 s.feeState = feeState 1053 } 1054 1055 func (s *state) GetLastAccepted() ids.ID { 1056 return s.lastAccepted 1057 } 1058 1059 func (s *state) SetLastAccepted(lastAccepted ids.ID) { 1060 s.lastAccepted = lastAccepted 1061 } 1062 1063 func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) { 1064 if subnetID == constants.PrimaryNetworkID { 1065 return s.currentSupply, nil 1066 } 1067 1068 supply, ok := s.modifiedSupplies[subnetID] 1069 if ok { 1070 return supply, nil 1071 } 1072 1073 cachedSupply, ok := s.supplyCache.Get(subnetID) 1074 if ok { 1075 if cachedSupply == nil { 1076 return 0, database.ErrNotFound 1077 } 1078 return *cachedSupply, nil 1079 } 1080 1081 supply, err := database.GetUInt64(s.supplyDB, subnetID[:]) 1082 if err == database.ErrNotFound { 1083 s.supplyCache.Put(subnetID, nil) 1084 return 0, database.ErrNotFound 1085 } 1086 if err != nil { 1087 return 0, err 1088 } 1089 1090 s.supplyCache.Put(subnetID, &supply) 1091 return supply, nil 1092 } 1093 1094 func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { 1095 if subnetID == constants.PrimaryNetworkID { 1096 s.currentSupply = cs 1097 } else { 1098 s.modifiedSupplies[subnetID] = cs 1099 } 1100 } 1101 1102 func (s *state) ApplyValidatorWeightDiffs( 1103 ctx context.Context, 1104 validators map[ids.NodeID]*validators.GetValidatorOutput, 1105 startHeight uint64, 1106 endHeight uint64, 1107 subnetID ids.ID, 1108 ) error { 1109 diffIter := s.validatorWeightDiffsDB.NewIteratorWithStartAndPrefix( 1110 marshalStartDiffKey(subnetID, startHeight), 1111 subnetID[:], 1112 ) 1113 defer diffIter.Release() 1114 1115 prevHeight := startHeight + 1 1116 for diffIter.Next() { 1117 if err := ctx.Err(); err != nil { 1118 return err 1119 } 1120 1121 _, parsedHeight, nodeID, err := unmarshalDiffKey(diffIter.Key()) 1122 if err != nil { 1123 return err 1124 } 1125 1126 if parsedHeight > prevHeight { 1127 s.ctx.Log.Error("unexpected parsed height", 1128 zap.Stringer("subnetID", subnetID), 1129 zap.Uint64("parsedHeight", parsedHeight), 1130 zap.Stringer("nodeID", nodeID), 1131 zap.Uint64("prevHeight", prevHeight), 1132 zap.Uint64("startHeight", startHeight), 1133 zap.Uint64("endHeight", endHeight), 1134 ) 1135 } 1136 1137 // If the parsedHeight is less than our target endHeight, then we have 1138 // fully processed the diffs from startHeight through endHeight. 1139 if parsedHeight < endHeight { 1140 return diffIter.Error() 1141 } 1142 1143 prevHeight = parsedHeight 1144 1145 weightDiff, err := unmarshalWeightDiff(diffIter.Value()) 1146 if err != nil { 1147 return err 1148 } 1149 1150 if err := applyWeightDiff(validators, nodeID, weightDiff); err != nil { 1151 return err 1152 } 1153 } 1154 return diffIter.Error() 1155 } 1156 1157 func applyWeightDiff( 1158 vdrs map[ids.NodeID]*validators.GetValidatorOutput, 1159 nodeID ids.NodeID, 1160 weightDiff *ValidatorWeightDiff, 1161 ) error { 1162 vdr, ok := vdrs[nodeID] 1163 if !ok { 1164 // This node isn't in the current validator set. 1165 vdr = &validators.GetValidatorOutput{ 1166 NodeID: nodeID, 1167 } 1168 vdrs[nodeID] = vdr 1169 } 1170 1171 // The weight of this node changed at this block. 1172 var err error 1173 if weightDiff.Decrease { 1174 // The validator's weight was decreased at this block, so in the 1175 // prior block it was higher. 1176 vdr.Weight, err = safemath.Add(vdr.Weight, weightDiff.Amount) 1177 } else { 1178 // The validator's weight was increased at this block, so in the 1179 // prior block it was lower. 1180 vdr.Weight, err = safemath.Sub(vdr.Weight, weightDiff.Amount) 1181 } 1182 if err != nil { 1183 return err 1184 } 1185 1186 if vdr.Weight == 0 { 1187 // The validator's weight was 0 before this block so they weren't in the 1188 // validator set. 1189 delete(vdrs, nodeID) 1190 } 1191 return nil 1192 } 1193 1194 func (s *state) ApplyValidatorPublicKeyDiffs( 1195 ctx context.Context, 1196 validators map[ids.NodeID]*validators.GetValidatorOutput, 1197 startHeight uint64, 1198 endHeight uint64, 1199 ) error { 1200 diffIter := s.validatorPublicKeyDiffsDB.NewIteratorWithStartAndPrefix( 1201 marshalStartDiffKey(constants.PrimaryNetworkID, startHeight), 1202 constants.PrimaryNetworkID[:], 1203 ) 1204 defer diffIter.Release() 1205 1206 for diffIter.Next() { 1207 if err := ctx.Err(); err != nil { 1208 return err 1209 } 1210 1211 _, parsedHeight, nodeID, err := unmarshalDiffKey(diffIter.Key()) 1212 if err != nil { 1213 return err 1214 } 1215 // If the parsedHeight is less than our target endHeight, then we have 1216 // fully processed the diffs from startHeight through endHeight. 1217 if parsedHeight < endHeight { 1218 break 1219 } 1220 1221 vdr, ok := validators[nodeID] 1222 if !ok { 1223 continue 1224 } 1225 1226 pkBytes := diffIter.Value() 1227 if len(pkBytes) == 0 { 1228 vdr.PublicKey = nil 1229 continue 1230 } 1231 1232 vdr.PublicKey = bls.PublicKeyFromValidUncompressedBytes(pkBytes) 1233 } 1234 1235 // Note: this does not fallback to the linkeddb index because the linkeddb 1236 // index does not contain entries for when to remove the public key. 1237 // 1238 // Nodes may see inconsistent public keys for heights before the new public 1239 // key index was populated. 1240 return diffIter.Error() 1241 } 1242 1243 func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error { 1244 genesisBlkID := genesisBlk.ID() 1245 s.SetLastAccepted(genesisBlkID) 1246 s.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0)) 1247 s.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply) 1248 s.AddStatelessBlock(genesisBlk) 1249 1250 // Persist UTXOs that exist at genesis 1251 for _, utxo := range genesis.UTXOs { 1252 avaxUTXO := utxo.UTXO 1253 s.AddUTXO(&avaxUTXO) 1254 } 1255 1256 // Persist primary network validator set at genesis 1257 for _, vdrTx := range genesis.Validators { 1258 // We expect genesis validator txs to be either AddValidatorTx or 1259 // AddPermissionlessValidatorTx. 1260 // 1261 // TODO: Enforce stricter type check 1262 validatorTx, ok := vdrTx.Unsigned.(txs.ScheduledStaker) 1263 if !ok { 1264 return fmt.Errorf("expected a scheduled staker but got %T", vdrTx.Unsigned) 1265 } 1266 1267 stakeAmount := validatorTx.Weight() 1268 // Note: We use [StartTime()] here because genesis transactions are 1269 // guaranteed to be pre-Durango activation. 1270 startTime := validatorTx.StartTime() 1271 stakeDuration := validatorTx.EndTime().Sub(startTime) 1272 currentSupply, err := s.GetCurrentSupply(constants.PrimaryNetworkID) 1273 if err != nil { 1274 return err 1275 } 1276 1277 potentialReward := s.rewards.Calculate( 1278 stakeDuration, 1279 stakeAmount, 1280 currentSupply, 1281 ) 1282 newCurrentSupply, err := safemath.Add(currentSupply, potentialReward) 1283 if err != nil { 1284 return err 1285 } 1286 1287 staker, err := NewCurrentStaker(vdrTx.ID(), validatorTx, startTime, potentialReward) 1288 if err != nil { 1289 return err 1290 } 1291 1292 if err := s.PutCurrentValidator(staker); err != nil { 1293 return err 1294 } 1295 s.AddTx(vdrTx, status.Committed) 1296 s.SetCurrentSupply(constants.PrimaryNetworkID, newCurrentSupply) 1297 } 1298 1299 for _, chain := range genesis.Chains { 1300 unsignedChain, ok := chain.Unsigned.(*txs.CreateChainTx) 1301 if !ok { 1302 return fmt.Errorf("expected tx type *txs.CreateChainTx but got %T", chain.Unsigned) 1303 } 1304 1305 // Ensure all chains that the genesis bytes say to create have the right 1306 // network ID 1307 if unsignedChain.NetworkID != s.ctx.NetworkID { 1308 return avax.ErrWrongNetworkID 1309 } 1310 1311 s.AddChain(chain) 1312 s.AddTx(chain, status.Committed) 1313 } 1314 1315 // updateValidators is set to false here to maintain the invariant that the 1316 // primary network's validator set is empty before the validator sets are 1317 // initialized. 1318 return s.write(false /*=updateValidators*/, 0) 1319 } 1320 1321 // Load pulls data previously stored on disk that is expected to be in memory. 1322 func (s *state) load() error { 1323 return errors.Join( 1324 s.loadMetadata(), 1325 s.loadCurrentValidators(), 1326 s.loadPendingValidators(), 1327 s.initValidatorSets(), 1328 ) 1329 } 1330 1331 func (s *state) loadMetadata() error { 1332 timestamp, err := database.GetTimestamp(s.singletonDB, TimestampKey) 1333 if err != nil { 1334 return err 1335 } 1336 s.persistedTimestamp = timestamp 1337 s.SetTimestamp(timestamp) 1338 1339 feeState, err := getFeeState(s.singletonDB) 1340 if err != nil { 1341 return err 1342 } 1343 s.persistedFeeState = feeState 1344 s.SetFeeState(feeState) 1345 1346 currentSupply, err := database.GetUInt64(s.singletonDB, CurrentSupplyKey) 1347 if err != nil { 1348 return err 1349 } 1350 s.persistedCurrentSupply = currentSupply 1351 s.SetCurrentSupply(constants.PrimaryNetworkID, currentSupply) 1352 1353 lastAccepted, err := database.GetID(s.singletonDB, LastAcceptedKey) 1354 if err != nil { 1355 return err 1356 } 1357 s.persistedLastAccepted = lastAccepted 1358 s.lastAccepted = lastAccepted 1359 1360 // Lookup the most recently indexed range on disk. If we haven't started 1361 // indexing the weights, then we keep the indexed heights as nil. 1362 indexedHeightsBytes, err := s.singletonDB.Get(HeightsIndexedKey) 1363 if err == database.ErrNotFound { 1364 return nil 1365 } 1366 if err != nil { 1367 return err 1368 } 1369 1370 indexedHeights := &heightRange{} 1371 _, err = block.GenesisCodec.Unmarshal(indexedHeightsBytes, indexedHeights) 1372 if err != nil { 1373 return err 1374 } 1375 1376 // If the indexed range is not up to date, then we will act as if the range 1377 // doesn't exist. 1378 lastAcceptedBlock, err := s.GetStatelessBlock(lastAccepted) 1379 if err != nil { 1380 return err 1381 } 1382 if indexedHeights.UpperBound != lastAcceptedBlock.Height() { 1383 return nil 1384 } 1385 s.indexedHeights = indexedHeights 1386 return nil 1387 } 1388 1389 func (s *state) loadCurrentValidators() error { 1390 s.currentStakers = newBaseStakers() 1391 1392 validatorIt := s.currentValidatorList.NewIterator() 1393 defer validatorIt.Release() 1394 for validatorIt.Next() { 1395 txIDBytes := validatorIt.Key() 1396 txID, err := ids.ToID(txIDBytes) 1397 if err != nil { 1398 return err 1399 } 1400 tx, _, err := s.GetTx(txID) 1401 if err != nil { 1402 return fmt.Errorf("failed loading validator transaction txID %s, %w", txID, err) 1403 } 1404 1405 stakerTx, ok := tx.Unsigned.(txs.Staker) 1406 if !ok { 1407 return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) 1408 } 1409 1410 metadataBytes := validatorIt.Value() 1411 metadata := &validatorMetadata{ 1412 txID: txID, 1413 } 1414 if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { 1415 // Populate [StakerStartTime] using the tx as a default in the event 1416 // it was added pre-durango and is not stored in the database. 1417 // 1418 // Note: We do not populate [LastUpdated] since it is expected to 1419 // always be present on disk. 1420 metadata.StakerStartTime = uint64(scheduledStakerTx.StartTime().Unix()) 1421 } 1422 if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { 1423 return err 1424 } 1425 1426 staker, err := NewCurrentStaker( 1427 txID, 1428 stakerTx, 1429 time.Unix(int64(metadata.StakerStartTime), 0), 1430 metadata.PotentialReward) 1431 if err != nil { 1432 return err 1433 } 1434 1435 validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) 1436 validator.validator = staker 1437 1438 s.currentStakers.stakers.ReplaceOrInsert(staker) 1439 1440 s.validatorState.LoadValidatorMetadata(staker.NodeID, staker.SubnetID, metadata) 1441 } 1442 1443 subnetValidatorIt := s.currentSubnetValidatorList.NewIterator() 1444 defer subnetValidatorIt.Release() 1445 for subnetValidatorIt.Next() { 1446 txIDBytes := subnetValidatorIt.Key() 1447 txID, err := ids.ToID(txIDBytes) 1448 if err != nil { 1449 return err 1450 } 1451 tx, _, err := s.GetTx(txID) 1452 if err != nil { 1453 return err 1454 } 1455 1456 stakerTx, ok := tx.Unsigned.(txs.Staker) 1457 if !ok { 1458 return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) 1459 } 1460 1461 metadataBytes := subnetValidatorIt.Value() 1462 metadata := &validatorMetadata{ 1463 txID: txID, 1464 } 1465 if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { 1466 // Populate [StakerStartTime] and [LastUpdated] using the tx as a 1467 // default in the event they are not stored in the database. 1468 startTime := uint64(scheduledStakerTx.StartTime().Unix()) 1469 metadata.StakerStartTime = startTime 1470 metadata.LastUpdated = startTime 1471 } 1472 if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { 1473 return err 1474 } 1475 1476 staker, err := NewCurrentStaker( 1477 txID, 1478 stakerTx, 1479 time.Unix(int64(metadata.StakerStartTime), 0), 1480 metadata.PotentialReward, 1481 ) 1482 if err != nil { 1483 return err 1484 } 1485 validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) 1486 validator.validator = staker 1487 1488 s.currentStakers.stakers.ReplaceOrInsert(staker) 1489 1490 s.validatorState.LoadValidatorMetadata(staker.NodeID, staker.SubnetID, metadata) 1491 } 1492 1493 delegatorIt := s.currentDelegatorList.NewIterator() 1494 defer delegatorIt.Release() 1495 1496 subnetDelegatorIt := s.currentSubnetDelegatorList.NewIterator() 1497 defer subnetDelegatorIt.Release() 1498 1499 for _, delegatorIt := range []database.Iterator{delegatorIt, subnetDelegatorIt} { 1500 for delegatorIt.Next() { 1501 txIDBytes := delegatorIt.Key() 1502 txID, err := ids.ToID(txIDBytes) 1503 if err != nil { 1504 return err 1505 } 1506 tx, _, err := s.GetTx(txID) 1507 if err != nil { 1508 return err 1509 } 1510 1511 stakerTx, ok := tx.Unsigned.(txs.Staker) 1512 if !ok { 1513 return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) 1514 } 1515 1516 metadataBytes := delegatorIt.Value() 1517 metadata := &delegatorMetadata{ 1518 txID: txID, 1519 } 1520 if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { 1521 // Populate [StakerStartTime] using the tx as a default in the 1522 // event it was added pre-durango and is not stored in the 1523 // database. 1524 metadata.StakerStartTime = uint64(scheduledStakerTx.StartTime().Unix()) 1525 } 1526 err = parseDelegatorMetadata(metadataBytes, metadata) 1527 if err != nil { 1528 return err 1529 } 1530 1531 staker, err := NewCurrentStaker( 1532 txID, 1533 stakerTx, 1534 time.Unix(int64(metadata.StakerStartTime), 0), 1535 metadata.PotentialReward, 1536 ) 1537 if err != nil { 1538 return err 1539 } 1540 1541 validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) 1542 if validator.delegators == nil { 1543 validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) 1544 } 1545 validator.delegators.ReplaceOrInsert(staker) 1546 1547 s.currentStakers.stakers.ReplaceOrInsert(staker) 1548 } 1549 } 1550 1551 return errors.Join( 1552 validatorIt.Error(), 1553 subnetValidatorIt.Error(), 1554 delegatorIt.Error(), 1555 subnetDelegatorIt.Error(), 1556 ) 1557 } 1558 1559 func (s *state) loadPendingValidators() error { 1560 s.pendingStakers = newBaseStakers() 1561 1562 validatorIt := s.pendingValidatorList.NewIterator() 1563 defer validatorIt.Release() 1564 1565 subnetValidatorIt := s.pendingSubnetValidatorList.NewIterator() 1566 defer subnetValidatorIt.Release() 1567 1568 for _, validatorIt := range []database.Iterator{validatorIt, subnetValidatorIt} { 1569 for validatorIt.Next() { 1570 txIDBytes := validatorIt.Key() 1571 txID, err := ids.ToID(txIDBytes) 1572 if err != nil { 1573 return err 1574 } 1575 tx, _, err := s.GetTx(txID) 1576 if err != nil { 1577 return err 1578 } 1579 1580 stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) 1581 if !ok { 1582 return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) 1583 } 1584 1585 staker, err := NewPendingStaker(txID, stakerTx) 1586 if err != nil { 1587 return err 1588 } 1589 1590 validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) 1591 validator.validator = staker 1592 1593 s.pendingStakers.stakers.ReplaceOrInsert(staker) 1594 } 1595 } 1596 1597 delegatorIt := s.pendingDelegatorList.NewIterator() 1598 defer delegatorIt.Release() 1599 1600 subnetDelegatorIt := s.pendingSubnetDelegatorList.NewIterator() 1601 defer subnetDelegatorIt.Release() 1602 1603 for _, delegatorIt := range []database.Iterator{delegatorIt, subnetDelegatorIt} { 1604 for delegatorIt.Next() { 1605 txIDBytes := delegatorIt.Key() 1606 txID, err := ids.ToID(txIDBytes) 1607 if err != nil { 1608 return err 1609 } 1610 tx, _, err := s.GetTx(txID) 1611 if err != nil { 1612 return err 1613 } 1614 1615 stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) 1616 if !ok { 1617 return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) 1618 } 1619 1620 staker, err := NewPendingStaker(txID, stakerTx) 1621 if err != nil { 1622 return err 1623 } 1624 1625 validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) 1626 if validator.delegators == nil { 1627 validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) 1628 } 1629 validator.delegators.ReplaceOrInsert(staker) 1630 1631 s.pendingStakers.stakers.ReplaceOrInsert(staker) 1632 } 1633 } 1634 1635 return errors.Join( 1636 validatorIt.Error(), 1637 subnetValidatorIt.Error(), 1638 delegatorIt.Error(), 1639 subnetDelegatorIt.Error(), 1640 ) 1641 } 1642 1643 // Invariant: initValidatorSets requires loadCurrentValidators to have already 1644 // been called. 1645 func (s *state) initValidatorSets() error { 1646 for subnetID, validators := range s.currentStakers.validators { 1647 if s.validators.Count(subnetID) != 0 { 1648 // Enforce the invariant that the validator set is empty here. 1649 return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) 1650 } 1651 1652 for nodeID, validator := range validators { 1653 validatorStaker := validator.validator 1654 if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { 1655 return err 1656 } 1657 1658 delegatorIterator := iterator.FromTree(validator.delegators) 1659 for delegatorIterator.Next() { 1660 delegatorStaker := delegatorIterator.Value() 1661 if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { 1662 delegatorIterator.Release() 1663 return err 1664 } 1665 } 1666 delegatorIterator.Release() 1667 } 1668 } 1669 1670 s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) 1671 totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) 1672 if err != nil { 1673 return fmt.Errorf("failed to get total weight of primary network validators: %w", err) 1674 } 1675 s.metrics.SetTotalStake(totalWeight) 1676 return nil 1677 } 1678 1679 func (s *state) write(updateValidators bool, height uint64) error { 1680 codecVersion := CodecVersion1 1681 if !s.upgrades.IsDurangoActivated(s.GetTimestamp()) { 1682 codecVersion = CodecVersion0 1683 } 1684 1685 return errors.Join( 1686 s.writeBlocks(), 1687 s.writeCurrentStakers(updateValidators, height, codecVersion), 1688 s.writePendingStakers(), 1689 s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers 1690 s.writeTXs(), 1691 s.writeRewardUTXOs(), 1692 s.writeUTXOs(), 1693 s.writeSubnets(), 1694 s.writeSubnetOwners(), 1695 s.writeSubnetManagers(), 1696 s.writeTransformedSubnets(), 1697 s.writeSubnetSupplies(), 1698 s.writeChains(), 1699 s.writeMetadata(), 1700 ) 1701 } 1702 1703 func (s *state) Close() error { 1704 return errors.Join( 1705 s.pendingSubnetValidatorBaseDB.Close(), 1706 s.pendingSubnetDelegatorBaseDB.Close(), 1707 s.pendingDelegatorBaseDB.Close(), 1708 s.pendingValidatorBaseDB.Close(), 1709 s.pendingValidatorsDB.Close(), 1710 s.currentSubnetValidatorBaseDB.Close(), 1711 s.currentSubnetDelegatorBaseDB.Close(), 1712 s.currentDelegatorBaseDB.Close(), 1713 s.currentValidatorBaseDB.Close(), 1714 s.currentValidatorsDB.Close(), 1715 s.validatorsDB.Close(), 1716 s.txDB.Close(), 1717 s.rewardUTXODB.Close(), 1718 s.utxoDB.Close(), 1719 s.subnetBaseDB.Close(), 1720 s.transformedSubnetDB.Close(), 1721 s.supplyDB.Close(), 1722 s.chainDB.Close(), 1723 s.singletonDB.Close(), 1724 s.blockDB.Close(), 1725 s.blockIDDB.Close(), 1726 ) 1727 } 1728 1729 func (s *state) sync(genesis []byte) error { 1730 wasInitialized, err := isInitialized(s.singletonDB) 1731 if err != nil { 1732 return fmt.Errorf( 1733 "failed to check if the database is initialized: %w", 1734 err, 1735 ) 1736 } 1737 1738 // If the database wasn't previously initialized, create the platform chain 1739 // anew using the provided genesis state. 1740 if !wasInitialized { 1741 if err := s.init(genesis); err != nil { 1742 return fmt.Errorf( 1743 "failed to initialize the database: %w", 1744 err, 1745 ) 1746 } 1747 } 1748 1749 if err := s.load(); err != nil { 1750 return fmt.Errorf( 1751 "failed to load the database state: %w", 1752 err, 1753 ) 1754 } 1755 return nil 1756 } 1757 1758 func (s *state) init(genesisBytes []byte) error { 1759 // Create the genesis block and save it as being accepted (We don't do 1760 // genesisBlock.Accept() because then it'd look for genesisBlock's 1761 // non-existent parent) 1762 genesisID := hashing.ComputeHash256Array(genesisBytes) 1763 genesisBlock, err := block.NewApricotCommitBlock(genesisID, 0 /*height*/) 1764 if err != nil { 1765 return err 1766 } 1767 1768 genesis, err := genesis.Parse(genesisBytes) 1769 if err != nil { 1770 return err 1771 } 1772 if err := s.syncGenesis(genesisBlock, genesis); err != nil { 1773 return err 1774 } 1775 1776 if err := markInitialized(s.singletonDB); err != nil { 1777 return err 1778 } 1779 1780 return s.Commit() 1781 } 1782 1783 func (s *state) AddStatelessBlock(block block.Block) { 1784 blkID := block.ID() 1785 s.addedBlockIDs[block.Height()] = blkID 1786 s.addedBlocks[blkID] = block 1787 } 1788 1789 func (s *state) SetHeight(height uint64) { 1790 if s.indexedHeights == nil { 1791 // If indexedHeights hasn't been created yet, then we are newly tracking 1792 // the range. This means we should initialize the LowerBound to the 1793 // current height. 1794 s.indexedHeights = &heightRange{ 1795 LowerBound: height, 1796 } 1797 } 1798 1799 s.indexedHeights.UpperBound = height 1800 s.currentHeight = height 1801 } 1802 1803 func (s *state) Commit() error { 1804 defer s.Abort() 1805 batch, err := s.CommitBatch() 1806 if err != nil { 1807 return err 1808 } 1809 return batch.Write() 1810 } 1811 1812 func (s *state) Abort() { 1813 s.baseDB.Abort() 1814 } 1815 1816 func (s *state) Checksum() ids.ID { 1817 return s.utxoState.Checksum() 1818 } 1819 1820 func (s *state) CommitBatch() (database.Batch, error) { 1821 // updateValidators is set to true here so that the validator manager is 1822 // kept up to date with the last accepted state. 1823 if err := s.write(true /*=updateValidators*/, s.currentHeight); err != nil { 1824 return nil, err 1825 } 1826 return s.baseDB.CommitBatch() 1827 } 1828 1829 func (s *state) writeBlocks() error { 1830 for blkID, blk := range s.addedBlocks { 1831 blkID := blkID 1832 blkBytes := blk.Bytes() 1833 blkHeight := blk.Height() 1834 heightKey := database.PackUInt64(blkHeight) 1835 1836 delete(s.addedBlockIDs, blkHeight) 1837 s.blockIDCache.Put(blkHeight, blkID) 1838 if err := database.PutID(s.blockIDDB, heightKey, blkID); err != nil { 1839 return fmt.Errorf("failed to add blockID: %w", err) 1840 } 1841 1842 delete(s.addedBlocks, blkID) 1843 // Note: Evict is used rather than Put here because blk may end up 1844 // referencing additional data (because of shared byte slices) that 1845 // would not be properly accounted for in the cache sizing. 1846 s.blockCache.Evict(blkID) 1847 if err := s.blockDB.Put(blkID[:], blkBytes); err != nil { 1848 return fmt.Errorf("failed to write block %s: %w", blkID, err) 1849 } 1850 } 1851 return nil 1852 } 1853 1854 func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { 1855 if blk, exists := s.addedBlocks[blockID]; exists { 1856 return blk, nil 1857 } 1858 if blk, cached := s.blockCache.Get(blockID); cached { 1859 if blk == nil { 1860 return nil, database.ErrNotFound 1861 } 1862 1863 return blk, nil 1864 } 1865 1866 blkBytes, err := s.blockDB.Get(blockID[:]) 1867 if err == database.ErrNotFound { 1868 s.blockCache.Put(blockID, nil) 1869 return nil, database.ErrNotFound 1870 } 1871 if err != nil { 1872 return nil, err 1873 } 1874 1875 blk, _, err := parseStoredBlock(blkBytes) 1876 if err != nil { 1877 return nil, err 1878 } 1879 1880 s.blockCache.Put(blockID, blk) 1881 return blk, nil 1882 } 1883 1884 func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { 1885 if blkID, exists := s.addedBlockIDs[height]; exists { 1886 return blkID, nil 1887 } 1888 if blkID, cached := s.blockIDCache.Get(height); cached { 1889 if blkID == ids.Empty { 1890 return ids.Empty, database.ErrNotFound 1891 } 1892 1893 return blkID, nil 1894 } 1895 1896 heightKey := database.PackUInt64(height) 1897 1898 blkID, err := database.GetID(s.blockIDDB, heightKey) 1899 if err == database.ErrNotFound { 1900 s.blockIDCache.Put(height, ids.Empty) 1901 return ids.Empty, database.ErrNotFound 1902 } 1903 if err != nil { 1904 return ids.Empty, err 1905 } 1906 1907 s.blockIDCache.Put(height, blkID) 1908 return blkID, nil 1909 } 1910 1911 func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { 1912 for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { 1913 delete(s.currentStakers.validatorDiffs, subnetID) 1914 1915 // Select db to write to 1916 validatorDB := s.currentSubnetValidatorList 1917 delegatorDB := s.currentSubnetDelegatorList 1918 if subnetID == constants.PrimaryNetworkID { 1919 validatorDB = s.currentValidatorList 1920 delegatorDB = s.currentDelegatorList 1921 } 1922 1923 // Record the change in weight and/or public key for each validator. 1924 for nodeID, validatorDiff := range validatorDiffs { 1925 // Copy [nodeID] so it doesn't get overwritten next iteration. 1926 nodeID := nodeID 1927 1928 weightDiff := &ValidatorWeightDiff{ 1929 Decrease: validatorDiff.validatorStatus == deleted, 1930 } 1931 switch validatorDiff.validatorStatus { 1932 case added: 1933 staker := validatorDiff.validator 1934 weightDiff.Amount = staker.Weight 1935 1936 // Invariant: Only the Primary Network contains non-nil public 1937 // keys. 1938 if staker.PublicKey != nil { 1939 // Record that the public key for the validator is being 1940 // added. This means the prior value for the public key was 1941 // nil. 1942 err := s.validatorPublicKeyDiffsDB.Put( 1943 marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), 1944 nil, 1945 ) 1946 if err != nil { 1947 return err 1948 } 1949 } 1950 1951 // The validator is being added. 1952 // 1953 // Invariant: It's impossible for a delegator to have been 1954 // rewarded in the same block that the validator was added. 1955 startTime := uint64(staker.StartTime.Unix()) 1956 metadata := &validatorMetadata{ 1957 txID: staker.TxID, 1958 lastUpdated: staker.StartTime, 1959 1960 UpDuration: 0, 1961 LastUpdated: startTime, 1962 StakerStartTime: startTime, 1963 PotentialReward: staker.PotentialReward, 1964 PotentialDelegateeReward: 0, 1965 } 1966 1967 metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) 1968 if err != nil { 1969 return fmt.Errorf("failed to serialize current validator: %w", err) 1970 } 1971 1972 if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil { 1973 return fmt.Errorf("failed to write current validator to list: %w", err) 1974 } 1975 1976 s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) 1977 case deleted: 1978 staker := validatorDiff.validator 1979 weightDiff.Amount = staker.Weight 1980 1981 // Invariant: Only the Primary Network contains non-nil public 1982 // keys. 1983 if staker.PublicKey != nil { 1984 // Record that the public key for the validator is being 1985 // removed. This means we must record the prior value of the 1986 // public key. 1987 // 1988 // Note: We store the uncompressed public key here as it is 1989 // significantly more efficient to parse when applying 1990 // diffs. 1991 err := s.validatorPublicKeyDiffsDB.Put( 1992 marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), 1993 bls.PublicKeyToUncompressedBytes(staker.PublicKey), 1994 ) 1995 if err != nil { 1996 return err 1997 } 1998 } 1999 2000 if err := validatorDB.Delete(staker.TxID[:]); err != nil { 2001 return fmt.Errorf("failed to delete current staker: %w", err) 2002 } 2003 2004 s.validatorState.DeleteValidatorMetadata(nodeID, subnetID) 2005 } 2006 2007 err := writeCurrentDelegatorDiff( 2008 delegatorDB, 2009 weightDiff, 2010 validatorDiff, 2011 codecVersion, 2012 ) 2013 if err != nil { 2014 return err 2015 } 2016 2017 if weightDiff.Amount == 0 { 2018 // No weight change to record; go to next validator. 2019 continue 2020 } 2021 2022 err = s.validatorWeightDiffsDB.Put( 2023 marshalDiffKey(subnetID, height, nodeID), 2024 marshalWeightDiff(weightDiff), 2025 ) 2026 if err != nil { 2027 return err 2028 } 2029 2030 // TODO: Move the validator set management out of the state package 2031 if !updateValidators { 2032 continue 2033 } 2034 2035 if weightDiff.Decrease { 2036 err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) 2037 } else { 2038 if validatorDiff.validatorStatus == added { 2039 staker := validatorDiff.validator 2040 err = s.validators.AddStaker( 2041 subnetID, 2042 nodeID, 2043 staker.PublicKey, 2044 staker.TxID, 2045 weightDiff.Amount, 2046 ) 2047 } else { 2048 err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) 2049 } 2050 } 2051 if err != nil { 2052 return fmt.Errorf("failed to update validator weight: %w", err) 2053 } 2054 } 2055 } 2056 2057 // TODO: Move validator set management out of the state package 2058 // 2059 // Attempt to update the stake metrics 2060 if !updateValidators { 2061 return nil 2062 } 2063 2064 totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) 2065 if err != nil { 2066 return fmt.Errorf("failed to get total weight of primary network: %w", err) 2067 } 2068 2069 s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) 2070 s.metrics.SetTotalStake(totalWeight) 2071 return nil 2072 } 2073 2074 func writeCurrentDelegatorDiff( 2075 currentDelegatorList linkeddb.LinkedDB, 2076 weightDiff *ValidatorWeightDiff, 2077 validatorDiff *diffValidator, 2078 codecVersion uint16, 2079 ) error { 2080 addedDelegatorIterator := iterator.FromTree(validatorDiff.addedDelegators) 2081 defer addedDelegatorIterator.Release() 2082 for addedDelegatorIterator.Next() { 2083 staker := addedDelegatorIterator.Value() 2084 2085 if err := weightDiff.Add(false, staker.Weight); err != nil { 2086 return fmt.Errorf("failed to increase node weight diff: %w", err) 2087 } 2088 2089 metadata := &delegatorMetadata{ 2090 txID: staker.TxID, 2091 PotentialReward: staker.PotentialReward, 2092 StakerStartTime: uint64(staker.StartTime.Unix()), 2093 } 2094 if err := writeDelegatorMetadata(currentDelegatorList, metadata, codecVersion); err != nil { 2095 return fmt.Errorf("failed to write current delegator to list: %w", err) 2096 } 2097 } 2098 2099 for _, staker := range validatorDiff.deletedDelegators { 2100 if err := weightDiff.Add(true, staker.Weight); err != nil { 2101 return fmt.Errorf("failed to decrease node weight diff: %w", err) 2102 } 2103 2104 if err := currentDelegatorList.Delete(staker.TxID[:]); err != nil { 2105 return fmt.Errorf("failed to delete current staker: %w", err) 2106 } 2107 } 2108 return nil 2109 } 2110 2111 func (s *state) writePendingStakers() error { 2112 for subnetID, subnetValidatorDiffs := range s.pendingStakers.validatorDiffs { 2113 delete(s.pendingStakers.validatorDiffs, subnetID) 2114 2115 validatorDB := s.pendingSubnetValidatorList 2116 delegatorDB := s.pendingSubnetDelegatorList 2117 if subnetID == constants.PrimaryNetworkID { 2118 validatorDB = s.pendingValidatorList 2119 delegatorDB = s.pendingDelegatorList 2120 } 2121 2122 for _, validatorDiff := range subnetValidatorDiffs { 2123 err := writePendingDiff( 2124 validatorDB, 2125 delegatorDB, 2126 validatorDiff, 2127 ) 2128 if err != nil { 2129 return err 2130 } 2131 } 2132 } 2133 return nil 2134 } 2135 2136 func writePendingDiff( 2137 pendingValidatorList linkeddb.LinkedDB, 2138 pendingDelegatorList linkeddb.LinkedDB, 2139 validatorDiff *diffValidator, 2140 ) error { 2141 switch validatorDiff.validatorStatus { 2142 case added: 2143 err := pendingValidatorList.Put(validatorDiff.validator.TxID[:], nil) 2144 if err != nil { 2145 return fmt.Errorf("failed to add pending validator: %w", err) 2146 } 2147 case deleted: 2148 err := pendingValidatorList.Delete(validatorDiff.validator.TxID[:]) 2149 if err != nil { 2150 return fmt.Errorf("failed to delete pending validator: %w", err) 2151 } 2152 } 2153 2154 addedDelegatorIterator := iterator.FromTree(validatorDiff.addedDelegators) 2155 defer addedDelegatorIterator.Release() 2156 for addedDelegatorIterator.Next() { 2157 staker := addedDelegatorIterator.Value() 2158 2159 if err := pendingDelegatorList.Put(staker.TxID[:], nil); err != nil { 2160 return fmt.Errorf("failed to write pending delegator to list: %w", err) 2161 } 2162 } 2163 2164 for _, staker := range validatorDiff.deletedDelegators { 2165 if err := pendingDelegatorList.Delete(staker.TxID[:]); err != nil { 2166 return fmt.Errorf("failed to delete pending delegator: %w", err) 2167 } 2168 } 2169 return nil 2170 } 2171 2172 func (s *state) writeTXs() error { 2173 for txID, txStatus := range s.addedTxs { 2174 txID := txID 2175 2176 stx := txBytesAndStatus{ 2177 Tx: txStatus.tx.Bytes(), 2178 Status: txStatus.status, 2179 } 2180 2181 // Note that we're serializing a [txBytesAndStatus] here, not a 2182 // *txs.Tx, so we don't use [txs.Codec]. 2183 txBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, &stx) 2184 if err != nil { 2185 return fmt.Errorf("failed to serialize tx: %w", err) 2186 } 2187 2188 delete(s.addedTxs, txID) 2189 // Note: Evict is used rather than Put here because stx may end up 2190 // referencing additional data (because of shared byte slices) that 2191 // would not be properly accounted for in the cache sizing. 2192 s.txCache.Evict(txID) 2193 if err := s.txDB.Put(txID[:], txBytes); err != nil { 2194 return fmt.Errorf("failed to add tx: %w", err) 2195 } 2196 } 2197 return nil 2198 } 2199 2200 func (s *state) writeRewardUTXOs() error { 2201 for txID, utxos := range s.addedRewardUTXOs { 2202 delete(s.addedRewardUTXOs, txID) 2203 s.rewardUTXOsCache.Put(txID, utxos) 2204 rawTxDB := prefixdb.New(txID[:], s.rewardUTXODB) 2205 txDB := linkeddb.NewDefault(rawTxDB) 2206 2207 for _, utxo := range utxos { 2208 utxoBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, utxo) 2209 if err != nil { 2210 return fmt.Errorf("failed to serialize reward UTXO: %w", err) 2211 } 2212 utxoID := utxo.InputID() 2213 if err := txDB.Put(utxoID[:], utxoBytes); err != nil { 2214 return fmt.Errorf("failed to add reward UTXO: %w", err) 2215 } 2216 } 2217 } 2218 return nil 2219 } 2220 2221 func (s *state) writeUTXOs() error { 2222 for utxoID, utxo := range s.modifiedUTXOs { 2223 delete(s.modifiedUTXOs, utxoID) 2224 2225 if utxo == nil { 2226 if err := s.utxoState.DeleteUTXO(utxoID); err != nil { 2227 return fmt.Errorf("failed to delete UTXO: %w", err) 2228 } 2229 continue 2230 } 2231 if err := s.utxoState.PutUTXO(utxo); err != nil { 2232 return fmt.Errorf("failed to add UTXO: %w", err) 2233 } 2234 } 2235 return nil 2236 } 2237 2238 func (s *state) writeSubnets() error { 2239 for _, subnetID := range s.addedSubnetIDs { 2240 if err := s.subnetDB.Put(subnetID[:], nil); err != nil { 2241 return fmt.Errorf("failed to write subnet: %w", err) 2242 } 2243 } 2244 s.addedSubnetIDs = nil 2245 return nil 2246 } 2247 2248 func (s *state) writeSubnetOwners() error { 2249 for subnetID, owner := range s.subnetOwners { 2250 subnetID := subnetID 2251 owner := owner 2252 delete(s.subnetOwners, subnetID) 2253 2254 ownerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &owner) 2255 if err != nil { 2256 return fmt.Errorf("failed to marshal subnet owner: %w", err) 2257 } 2258 2259 s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ 2260 owner: owner, 2261 size: len(ownerBytes), 2262 }) 2263 2264 if err := s.subnetOwnerDB.Put(subnetID[:], ownerBytes); err != nil { 2265 return fmt.Errorf("failed to write subnet owner: %w", err) 2266 } 2267 } 2268 return nil 2269 } 2270 2271 func (s *state) writeSubnetManagers() error { 2272 for subnetID, manager := range s.subnetManagers { 2273 subnetID := subnetID 2274 manager := manager 2275 delete(s.subnetManagers, subnetID) 2276 2277 managerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &manager) 2278 if err != nil { 2279 return fmt.Errorf("failed to marshal subnet manager: %w", err) 2280 } 2281 2282 s.subnetManagerCache.Put(subnetID, manager) 2283 2284 if err := s.subnetManagerDB.Put(subnetID[:], managerBytes); err != nil { 2285 return fmt.Errorf("failed to write subnet manager: %w", err) 2286 } 2287 } 2288 return nil 2289 } 2290 2291 func (s *state) writeTransformedSubnets() error { 2292 for subnetID, tx := range s.transformedSubnets { 2293 txID := tx.ID() 2294 2295 delete(s.transformedSubnets, subnetID) 2296 // Note: Evict is used rather than Put here because tx may end up 2297 // referencing additional data (because of shared byte slices) that 2298 // would not be properly accounted for in the cache sizing. 2299 s.transformedSubnetCache.Evict(subnetID) 2300 if err := database.PutID(s.transformedSubnetDB, subnetID[:], txID); err != nil { 2301 return fmt.Errorf("failed to write transformed subnet: %w", err) 2302 } 2303 } 2304 return nil 2305 } 2306 2307 func (s *state) writeSubnetSupplies() error { 2308 for subnetID, supply := range s.modifiedSupplies { 2309 supply := supply 2310 delete(s.modifiedSupplies, subnetID) 2311 s.supplyCache.Put(subnetID, &supply) 2312 if err := database.PutUInt64(s.supplyDB, subnetID[:], supply); err != nil { 2313 return fmt.Errorf("failed to write subnet supply: %w", err) 2314 } 2315 } 2316 return nil 2317 } 2318 2319 func (s *state) writeChains() error { 2320 for subnetID, chains := range s.addedChains { 2321 for _, chain := range chains { 2322 chainDB := s.getChainDB(subnetID) 2323 2324 chainID := chain.ID() 2325 if err := chainDB.Put(chainID[:], nil); err != nil { 2326 return fmt.Errorf("failed to write chain: %w", err) 2327 } 2328 } 2329 delete(s.addedChains, subnetID) 2330 } 2331 return nil 2332 } 2333 2334 func (s *state) writeMetadata() error { 2335 if !s.persistedTimestamp.Equal(s.timestamp) { 2336 if err := database.PutTimestamp(s.singletonDB, TimestampKey, s.timestamp); err != nil { 2337 return fmt.Errorf("failed to write timestamp: %w", err) 2338 } 2339 s.persistedTimestamp = s.timestamp 2340 } 2341 if s.feeState != s.persistedFeeState { 2342 if err := putFeeState(s.singletonDB, s.feeState); err != nil { 2343 return fmt.Errorf("failed to write fee state: %w", err) 2344 } 2345 s.persistedFeeState = s.feeState 2346 } 2347 if s.persistedCurrentSupply != s.currentSupply { 2348 if err := database.PutUInt64(s.singletonDB, CurrentSupplyKey, s.currentSupply); err != nil { 2349 return fmt.Errorf("failed to write current supply: %w", err) 2350 } 2351 s.persistedCurrentSupply = s.currentSupply 2352 } 2353 if s.persistedLastAccepted != s.lastAccepted { 2354 if err := database.PutID(s.singletonDB, LastAcceptedKey, s.lastAccepted); err != nil { 2355 return fmt.Errorf("failed to write last accepted: %w", err) 2356 } 2357 s.persistedLastAccepted = s.lastAccepted 2358 } 2359 if s.indexedHeights != nil { 2360 indexedHeightsBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, s.indexedHeights) 2361 if err != nil { 2362 return err 2363 } 2364 if err := s.singletonDB.Put(HeightsIndexedKey, indexedHeightsBytes); err != nil { 2365 return fmt.Errorf("failed to write indexed range: %w", err) 2366 } 2367 } 2368 return nil 2369 } 2370 2371 // Returns the block and whether it is a [stateBlk]. 2372 // Invariant: blkBytes is safe to parse with blocks.GenesisCodec 2373 // 2374 // TODO: Remove after v1.12.x is activated 2375 func parseStoredBlock(blkBytes []byte) (block.Block, bool, error) { 2376 // Attempt to parse as blocks.Block 2377 blk, err := block.Parse(block.GenesisCodec, blkBytes) 2378 if err == nil { 2379 return blk, false, nil 2380 } 2381 2382 // Fallback to [stateBlk] 2383 blkState := stateBlk{} 2384 if _, err := block.GenesisCodec.Unmarshal(blkBytes, &blkState); err != nil { 2385 return nil, false, err 2386 } 2387 2388 blk, err = block.Parse(block.GenesisCodec, blkState.Bytes) 2389 return blk, true, err 2390 } 2391 2392 func (s *state) ReindexBlocks(lock sync.Locker, log logging.Logger) error { 2393 has, err := s.singletonDB.Has(BlocksReindexedKey) 2394 if err != nil { 2395 return err 2396 } 2397 if has { 2398 log.Info("blocks already reindexed") 2399 return nil 2400 } 2401 2402 // It is possible that new blocks are added after grabbing this iterator. 2403 // New blocks are guaranteed to be persisted in the new format, so we don't 2404 // need to check them. 2405 blockIterator := s.blockDB.NewIterator() 2406 // Releasing is done using a closure to ensure that updating blockIterator 2407 // will result in having the most recent iterator released when executing 2408 // the deferred function. 2409 defer func() { 2410 blockIterator.Release() 2411 }() 2412 2413 log.Info("starting block reindexing") 2414 2415 var ( 2416 startTime = time.Now() 2417 lastCommit = startTime 2418 nextUpdate = startTime.Add(indexLogFrequency) 2419 numIndicesChecked = 0 2420 numIndicesUpdated = 0 2421 ) 2422 2423 for blockIterator.Next() { 2424 valueBytes := blockIterator.Value() 2425 blk, isStateBlk, err := parseStoredBlock(valueBytes) 2426 if err != nil { 2427 return fmt.Errorf("failed to parse block: %w", err) 2428 } 2429 2430 blkID := blk.ID() 2431 2432 // This block was previously stored using the legacy format, update the 2433 // index to remove the usage of stateBlk. 2434 if isStateBlk { 2435 blkBytes := blk.Bytes() 2436 if err := s.blockDB.Put(blkID[:], blkBytes); err != nil { 2437 return fmt.Errorf("failed to write block: %w", err) 2438 } 2439 2440 numIndicesUpdated++ 2441 } 2442 2443 numIndicesChecked++ 2444 2445 now := time.Now() 2446 if now.After(nextUpdate) { 2447 nextUpdate = now.Add(indexLogFrequency) 2448 2449 progress := timer.ProgressFromHash(blkID[:]) 2450 eta := timer.EstimateETA( 2451 startTime, 2452 progress, 2453 math.MaxUint64, 2454 ) 2455 2456 log.Info("reindexing blocks", 2457 zap.Int("numIndicesUpdated", numIndicesUpdated), 2458 zap.Int("numIndicesChecked", numIndicesChecked), 2459 zap.Duration("eta", eta), 2460 ) 2461 } 2462 2463 if numIndicesChecked%indexIterationLimit == 0 { 2464 // We must hold the lock during committing to make sure we don't 2465 // attempt to commit to disk while a block is concurrently being 2466 // accepted. 2467 lock.Lock() 2468 err := errors.Join( 2469 s.Commit(), 2470 blockIterator.Error(), 2471 ) 2472 lock.Unlock() 2473 if err != nil { 2474 return err 2475 } 2476 2477 // We release the iterator here to allow the underlying database to 2478 // clean up deleted state. 2479 blockIterator.Release() 2480 2481 // We take the minimum here because it's possible that the node is 2482 // currently bootstrapping. This would mean that grabbing the lock 2483 // could take an extremely long period of time; which we should not 2484 // delay processing for. 2485 indexDuration := now.Sub(lastCommit) 2486 sleepDuration := min( 2487 indexIterationSleepMultiplier*indexDuration, 2488 indexIterationSleepCap, 2489 ) 2490 time.Sleep(sleepDuration) 2491 2492 // Make sure not to include the sleep duration into the next index 2493 // duration. 2494 lastCommit = time.Now() 2495 2496 blockIterator = s.blockDB.NewIteratorWithStart(blkID[:]) 2497 } 2498 } 2499 2500 // Ensure we fully iterated over all blocks before writing that indexing has 2501 // finished. 2502 // 2503 // Note: This is needed because a transient read error could cause the 2504 // iterator to stop early. 2505 if err := blockIterator.Error(); err != nil { 2506 return fmt.Errorf("failed to iterate over historical blocks: %w", err) 2507 } 2508 2509 if err := s.singletonDB.Put(BlocksReindexedKey, nil); err != nil { 2510 return fmt.Errorf("failed to put marked blocks as reindexed: %w", err) 2511 } 2512 2513 // We must hold the lock during committing to make sure we don't attempt to 2514 // commit to disk while a block is concurrently being accepted. 2515 lock.Lock() 2516 defer lock.Unlock() 2517 2518 log.Info("finished block reindexing", 2519 zap.Int("numIndicesUpdated", numIndicesUpdated), 2520 zap.Int("numIndicesChecked", numIndicesChecked), 2521 zap.Duration("duration", time.Since(startTime)), 2522 ) 2523 2524 return s.Commit() 2525 } 2526 2527 func markInitialized(db database.KeyValueWriter) error { 2528 return db.Put(InitializedKey, nil) 2529 } 2530 2531 func isInitialized(db database.KeyValueReader) (bool, error) { 2532 return db.Has(InitializedKey) 2533 } 2534 2535 func putFeeState(db database.KeyValueWriter, feeState gas.State) error { 2536 feeStateBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, feeState) 2537 if err != nil { 2538 return err 2539 } 2540 return db.Put(FeeStateKey, feeStateBytes) 2541 } 2542 2543 func getFeeState(db database.KeyValueReader) (gas.State, error) { 2544 feeStateBytes, err := db.Get(FeeStateKey) 2545 if err == database.ErrNotFound { 2546 return gas.State{}, nil 2547 } 2548 if err != nil { 2549 return gas.State{}, err 2550 } 2551 2552 var feeState gas.State 2553 if _, err := block.GenesisCodec.Unmarshal(feeStateBytes, &feeState); err != nil { 2554 return gas.State{}, err 2555 } 2556 return feeState, nil 2557 }