github.com/dim4egster/coreth@v0.10.2/plugin/evm/syncervm_client.go (about) 1 // (c) 2021-2022, Ava Labs, Inc. All rights reserved. 2 // See the file LICENSE for licensing terms. 3 4 package evm 5 6 import ( 7 "context" 8 "fmt" 9 10 "github.com/dim4egster/qmallgo/database" 11 "github.com/dim4egster/qmallgo/database/versiondb" 12 "github.com/dim4egster/qmallgo/ids" 13 "github.com/dim4egster/qmallgo/snow/choices" 14 commonEng "github.com/dim4egster/qmallgo/snow/engine/common" 15 "github.com/dim4egster/qmallgo/snow/engine/snowman/block" 16 "github.com/dim4egster/qmallgo/vms/components/chain" 17 "github.com/dim4egster/coreth/core/rawdb" 18 "github.com/dim4egster/coreth/core/state/snapshot" 19 "github.com/dim4egster/coreth/eth" 20 "github.com/dim4egster/coreth/ethdb" 21 "github.com/dim4egster/coreth/params" 22 "github.com/dim4egster/coreth/plugin/evm/message" 23 syncclient "github.com/dim4egster/coreth/sync/client" 24 "github.com/dim4egster/coreth/sync/statesync" 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/log" 27 ) 28 29 const ( 30 // State sync fetches [parentsToGet] parents of the block it syncs to. 31 // The last 256 block hashes are necessary to support the BLOCKHASH opcode. 32 parentsToGet = 256 33 ) 34 35 var stateSyncSummaryKey = []byte("stateSyncSummary") 36 37 // stateSyncClientConfig defines the options and dependencies needed to construct a StateSyncerClient 38 type stateSyncClientConfig struct { 39 enabled bool 40 skipResume bool 41 // Specifies the number of blocks behind the latest state summary that the chain must be 42 // in order to prefer performing state sync over falling back to the normal bootstrapping 43 // algorithm. 44 stateSyncMinBlocks uint64 45 46 lastAcceptedHeight uint64 47 48 chain *eth.Ethereum 49 state *chain.State 50 chaindb ethdb.Database 51 metadataDB database.Database 52 acceptedBlockDB database.Database 53 db *versiondb.Database 54 atomicBackend AtomicBackend 55 56 client syncclient.Client 57 58 toEngine chan<- commonEng.Message 59 } 60 61 type stateSyncerClient struct { 62 *stateSyncClientConfig 63 64 resumableSummary message.SyncSummary 65 66 cancel context.CancelFunc 67 68 // State Sync results 69 syncSummary message.SyncSummary 70 stateSyncErr error 71 } 72 73 func NewStateSyncClient(config *stateSyncClientConfig) StateSyncClient { 74 return &stateSyncerClient{ 75 stateSyncClientConfig: config, 76 } 77 } 78 79 type StateSyncClient interface { 80 // methods that implement the client side of [block.StateSyncableVM] 81 StateSyncEnabled() (bool, error) 82 GetOngoingSyncStateSummary() (block.StateSummary, error) 83 ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) 84 85 // additional methods required by the evm package 86 StateSyncClearOngoingSummary() error 87 Shutdown() error 88 Error() error 89 } 90 91 // Syncer represents a step in state sync, 92 // along with Start/Done methods to control 93 // and monitor progress. 94 // Error returns an error if any was encountered. 95 type Syncer interface { 96 Start(ctx context.Context) error 97 Done() <-chan error 98 } 99 100 // StateSyncEnabled returns [client.enabled], which is set in the chain's config file. 101 func (client *stateSyncerClient) StateSyncEnabled() (bool, error) { return client.enabled, nil } 102 103 // GetOngoingSyncStateSummary returns a state summary that was previously started 104 // and not finished, and sets [resumableSummary] if one was found. 105 // Returns [database.ErrNotFound] if no ongoing summary is found or if [client.skipResume] is true. 106 func (client *stateSyncerClient) GetOngoingSyncStateSummary() (block.StateSummary, error) { 107 if client.skipResume { 108 return nil, database.ErrNotFound 109 } 110 111 summaryBytes, err := client.metadataDB.Get(stateSyncSummaryKey) 112 if err != nil { 113 return nil, err // includes the [database.ErrNotFound] case 114 } 115 116 summary, err := message.NewSyncSummaryFromBytes(summaryBytes, client.acceptSyncSummary) 117 if err != nil { 118 return nil, fmt.Errorf("failed to parse saved state sync summary to SyncSummary: %w", err) 119 } 120 client.resumableSummary = summary 121 return summary, nil 122 } 123 124 // StateSyncClearOngoingSummary clears any marker of an ongoing state sync summary 125 func (client *stateSyncerClient) StateSyncClearOngoingSummary() error { 126 if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { 127 return fmt.Errorf("failed to clear ongoing summary: %w", err) 128 } 129 if err := client.db.Commit(); err != nil { 130 return fmt.Errorf("failed to commit db while clearing ongoing summary: %w", err) 131 } 132 133 return nil 134 } 135 136 // ParseStateSummary parses [summaryBytes] to [commonEng.Summary] 137 func (client *stateSyncerClient) ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) { 138 return message.NewSyncSummaryFromBytes(summaryBytes, client.acceptSyncSummary) 139 } 140 141 // stateSync blockingly performs the state sync for the EVM state and the atomic state 142 // to [client.syncSummary]. returns an error if one occurred. 143 func (client *stateSyncerClient) stateSync() error { 144 ctx, cancel := context.WithCancel(context.Background()) 145 client.cancel = cancel 146 defer cancel() 147 148 if err := client.syncBlocks(ctx, client.syncSummary.BlockHash, client.syncSummary.BlockNumber, parentsToGet); err != nil { 149 return err 150 } 151 152 // Sync the EVM trie and then the atomic trie. These steps could be done 153 // in parallel or in the opposite order. Keeping them serial for simplicity for now. 154 if err := client.syncStateTrie(ctx); err != nil { 155 return err 156 } 157 158 return client.syncAtomicTrie(ctx) 159 } 160 161 // acceptSyncSummary returns true if sync will be performed and launches the state sync process 162 // in a goroutine. 163 func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncSummary) (bool, error) { 164 isResume := proposedSummary.BlockHash == client.resumableSummary.BlockHash 165 if !isResume { 166 // Skip syncing if the blockchain is not significantly ahead of local state, 167 // since bootstrapping would be faster. 168 // (Also ensures we don't sync to a height prior to local state.) 169 if client.lastAcceptedHeight+client.stateSyncMinBlocks > proposedSummary.Height() { 170 log.Info( 171 "last accepted too close to most recent syncable block, skipping state sync", 172 "lastAccepted", client.lastAcceptedHeight, 173 "syncableHeight", proposedSummary.Height(), 174 ) 175 if err := client.StateSyncClearOngoingSummary(); err != nil { 176 return false, fmt.Errorf("failed to clear ongoing summary after skipping state sync: %w", err) 177 } 178 // Initialize snapshots if we're skipping state sync, since it will not have been initialized on 179 // startup. 180 client.chain.BlockChain().InitializeSnapshots() 181 return false, nil 182 } 183 184 // Wipe the snapshot completely if we are not resuming from an existing sync, so that we do not 185 // use a corrupted snapshot. 186 // Note: this assumes that when the node is started with state sync disabled, the in-progress state 187 // sync marker will be wiped, so we do not accidentally resume progress from an incorrect version 188 // of the snapshot. (if switching between versions that come before this change and back this could 189 // lead to the snapshot not being cleaned up correctly) 190 <-snapshot.WipeSnapshot(client.chaindb, true) 191 // Reset the snapshot generator here so that when state sync completes, snapshots will not attempt to read an 192 // invalid generator. 193 // Note: this must be called after WipeSnapshot is called so that we do not invalidate a partially generated snapshot. 194 snapshot.ResetSnapshotGeneration(client.chaindb) 195 } 196 client.syncSummary = proposedSummary 197 198 // Update the current state sync summary key in the database 199 // Note: this must be performed after WipeSnapshot finishes so that we do not start a state sync 200 // session from a partially wiped snapshot. 201 if err := client.metadataDB.Put(stateSyncSummaryKey, proposedSummary.Bytes()); err != nil { 202 return false, fmt.Errorf("failed to write state sync summary key to disk: %w", err) 203 } 204 if err := client.db.Commit(); err != nil { 205 return false, fmt.Errorf("failed to commit db: %w", err) 206 } 207 208 log.Info("Starting state sync", "summary", proposedSummary) 209 go func() { 210 if err := client.stateSync(); err != nil { 211 client.stateSyncErr = err 212 } else { 213 client.stateSyncErr = client.finishSync() 214 } 215 // notify engine regardless of whether err == nil, 216 // this error will be propagated to the engine when it calls 217 // vm.SetState(snow.Bootstrapping) 218 log.Info("stateSync completed, notifying engine", "err", client.stateSyncErr) 219 client.toEngine <- commonEng.StateSyncDone 220 }() 221 return true, nil 222 } 223 224 // syncBlocks fetches (up to) [parentsToGet] blocks from peers 225 // using [client] and writes them to disk. 226 // the process begins with [fromHash] and it fetches parents recursively. 227 // fetching starts from the first ancestor not found on disk 228 func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common.Hash, fromHeight uint64, parentsToGet int) error { 229 nextHash := fromHash 230 nextHeight := fromHeight 231 parentsPerRequest := uint16(32) 232 233 // first, check for blocks already available on disk so we don't 234 // request them from peers. 235 for parentsToGet >= 0 { 236 blk := rawdb.ReadBlock(client.chaindb, nextHash, nextHeight) 237 if blk != nil { 238 // block exists 239 nextHash = blk.ParentHash() 240 nextHeight-- 241 parentsToGet-- 242 continue 243 } 244 245 // block was not found 246 break 247 } 248 249 // get any blocks we couldn't find on disk from peers and write 250 // them to disk. 251 batch := client.chaindb.NewBatch() 252 for i := parentsToGet - 1; i >= 0 && (nextHash != common.Hash{}); { 253 if err := ctx.Err(); err != nil { 254 return err 255 } 256 blocks, err := client.client.GetBlocks(ctx, nextHash, nextHeight, parentsPerRequest) 257 if err != nil { 258 log.Warn("could not get blocks from peer", "err", err, "nextHash", nextHash, "remaining", i+1) 259 return err 260 } 261 for _, block := range blocks { 262 rawdb.WriteBlock(batch, block) 263 rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) 264 265 i-- 266 nextHash = block.ParentHash() 267 nextHeight-- 268 } 269 log.Info("fetching blocks from peer", "remaining", i+1, "total", parentsToGet) 270 } 271 log.Info("fetched blocks from peer", "total", parentsToGet) 272 return batch.Write() 273 } 274 275 func (client *stateSyncerClient) syncAtomicTrie(ctx context.Context) error { 276 log.Info("atomic tx: sync starting", "root", client.syncSummary.AtomicRoot) 277 atomicSyncer, err := client.atomicBackend.Syncer(client.client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber) 278 if err != nil { 279 return err 280 } 281 if err := atomicSyncer.Start(ctx); err != nil { 282 return err 283 } 284 err = <-atomicSyncer.Done() 285 log.Info("atomic tx: sync finished", "root", client.syncSummary.AtomicRoot, "err", err) 286 return err 287 } 288 289 func (client *stateSyncerClient) syncStateTrie(ctx context.Context) error { 290 log.Info("state sync: sync starting", "root", client.syncSummary.BlockRoot) 291 evmSyncer, err := statesync.NewStateSyncer(&statesync.StateSyncerConfig{ 292 Client: client.client, 293 Root: client.syncSummary.BlockRoot, 294 BatchSize: ethdb.IdealBatchSize, 295 DB: client.chaindb, 296 MaxOutstandingCodeHashes: statesync.DefaultMaxOutstandingCodeHashes, 297 NumCodeFetchingWorkers: statesync.DefaultNumCodeFetchingWorkers, 298 }) 299 if err != nil { 300 return err 301 } 302 if err := evmSyncer.Start(ctx); err != nil { 303 return err 304 } 305 err = <-evmSyncer.Done() 306 log.Info("state sync: sync finished", "root", client.syncSummary.BlockRoot, "err", err) 307 return err 308 } 309 310 func (client *stateSyncerClient) Shutdown() error { 311 if client.cancel != nil { 312 client.cancel() 313 } 314 return nil 315 } 316 317 // finishSync is responsible for updating disk and memory pointers so the VM is prepared 318 // for bootstrapping. Executes any shared memory operations from the atomic trie to shared memory. 319 func (client *stateSyncerClient) finishSync() error { 320 stateBlock, err := client.state.GetBlock(ids.ID(client.syncSummary.BlockHash)) 321 if err != nil { 322 return fmt.Errorf("could not get block by hash from client state: %s", client.syncSummary.BlockHash) 323 } 324 325 wrapper, ok := stateBlock.(*chain.BlockWrapper) 326 if !ok { 327 return fmt.Errorf("could not convert block(%T) to *chain.BlockWrapper", wrapper) 328 } 329 evmBlock, ok := wrapper.Block.(*Block) 330 if !ok { 331 return fmt.Errorf("could not convert block(%T) to evm.Block", stateBlock) 332 } 333 334 evmBlock.SetStatus(choices.Accepted) 335 block := evmBlock.ethBlock 336 337 if block.Hash() != client.syncSummary.BlockHash { 338 return fmt.Errorf("attempted to set last summary block to unexpected block hash: (%s != %s)", block.Hash(), client.syncSummary.BlockHash) 339 } 340 if block.NumberU64() != client.syncSummary.BlockNumber { 341 return fmt.Errorf("attempted to set last summary block to unexpected block number: (%d != %d)", block.NumberU64(), client.syncSummary.BlockNumber) 342 } 343 344 // BloomIndexer needs to know that some parts of the chain are not available 345 // and cannot be indexed. This is done by calling [AddCheckpoint] here. 346 // Since the indexer uses sections of size [params.BloomBitsBlocks] (= 4096), 347 // each block is indexed in section number [blockNumber/params.BloomBitsBlocks]. 348 // To allow the indexer to start with the block we just synced to, 349 // we create a checkpoint for its parent. 350 // Note: This requires assuming the synced block height is divisible 351 // by [params.BloomBitsBlocks]. 352 parentHeight := block.NumberU64() - 1 353 parentHash := block.ParentHash() 354 client.chain.BloomIndexer().AddCheckpoint(parentHeight/params.BloomBitsBlocks, parentHash) 355 356 if err := client.chain.BlockChain().ResetState(block); err != nil { 357 return err 358 } 359 360 if err := client.updateVMMarkers(); err != nil { 361 return fmt.Errorf("error updating vm markers, height=%d, hash=%s, err=%w", block.NumberU64(), block.Hash(), err) 362 } 363 364 if err := client.state.SetLastAcceptedBlock(evmBlock); err != nil { 365 return err 366 } 367 368 // the chain state is already restored, and from this point on 369 // the block synced to is the accepted block. the last operation 370 // is updating shared memory with the atomic trie. 371 // ApplyToSharedMemory does this, and even if the VM is stopped 372 // (gracefully or ungracefully), since MarkApplyToSharedMemoryCursor 373 // is called, VM will resume ApplyToSharedMemory on Initialize. 374 return client.atomicBackend.ApplyToSharedMemory(block.NumberU64()) 375 } 376 377 // updateVMMarkers updates the following markers in the VM's database 378 // and commits them atomically: 379 // - updates atomic trie so it will have necessary metadata for the last committed root 380 // - updates atomic trie so it will resume applying operations to shared memory on initialize 381 // - updates lastAcceptedKey 382 // - removes state sync progress markers 383 func (client *stateSyncerClient) updateVMMarkers() error { 384 // Mark the previously last accepted block for the shared memory cursor, so that we will execute shared 385 // memory operations from the previously last accepted block to [vm.syncSummary] when ApplyToSharedMemory 386 // is called. 387 if err := client.atomicBackend.MarkApplyToSharedMemoryCursor(client.lastAcceptedHeight); err != nil { 388 return err 389 } 390 client.atomicBackend.SetLastAccepted(client.syncSummary.BlockHash) 391 if err := client.acceptedBlockDB.Put(lastAcceptedKey, client.syncSummary.BlockHash[:]); err != nil { 392 return err 393 } 394 if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { 395 return err 396 } 397 return client.db.Commit() 398 } 399 400 // Error returns a non-nil error if one occurred during the sync. 401 func (client *stateSyncerClient) Error() error { return client.stateSyncErr }