github.com/unicornultrafoundation/go-u2u@v1.0.0-rc1.0.20240205080301-e74a83d3fadc/gossip/c_block_callbacks.go (about) 1 package gossip 2 3 import ( 4 "fmt" 5 "sort" 6 "sync" 7 "sync/atomic" 8 "time" 9 10 "github.com/unicornultrafoundation/go-helios/hash" 11 "github.com/unicornultrafoundation/go-helios/native/dag" 12 "github.com/unicornultrafoundation/go-helios/native/idx" 13 "github.com/unicornultrafoundation/go-helios/native/pos" 14 utypes "github.com/unicornultrafoundation/go-helios/types" 15 "github.com/unicornultrafoundation/go-helios/utils/workers" 16 17 "github.com/unicornultrafoundation/go-u2u/common" 18 "github.com/unicornultrafoundation/go-u2u/core/types" 19 "github.com/unicornultrafoundation/go-u2u/evmcore" 20 "github.com/unicornultrafoundation/go-u2u/evmcore/txtracer" 21 "github.com/unicornultrafoundation/go-u2u/gossip/blockproc/verwatcher" 22 "github.com/unicornultrafoundation/go-u2u/gossip/emitter" 23 "github.com/unicornultrafoundation/go-u2u/gossip/evmstore" 24 "github.com/unicornultrafoundation/go-u2u/log" 25 "github.com/unicornultrafoundation/go-u2u/metrics" 26 "github.com/unicornultrafoundation/go-u2u/native" 27 "github.com/unicornultrafoundation/go-u2u/native/iblockproc" 28 "github.com/unicornultrafoundation/go-u2u/u2u" 29 "github.com/unicornultrafoundation/go-u2u/utils" 30 ) 31 32 var ( 33 // Ethereum compatible metrics set (see go-ethereum/core) 34 35 headBlockGauge = metrics.GetOrRegisterGauge("chain/head/block", nil) 36 headHeaderGauge = metrics.GetOrRegisterGauge("chain/head/header", nil) 37 headFastBlockGauge = metrics.GetOrRegisterGauge("chain/head/receipt", nil) 38 39 accountReadTimer = metrics.GetOrRegisterTimer("chain/account/reads", nil) 40 accountHashTimer = metrics.GetOrRegisterTimer("chain/account/hashes", nil) 41 accountUpdateTimer = metrics.GetOrRegisterTimer("chain/account/updates", nil) 42 accountCommitTimer = metrics.GetOrRegisterTimer("chain/account/commits", nil) 43 44 storageReadTimer = metrics.GetOrRegisterTimer("chain/storage/reads", nil) 45 storageHashTimer = metrics.GetOrRegisterTimer("chain/storage/hashes", nil) 46 storageUpdateTimer = metrics.GetOrRegisterTimer("chain/storage/updates", nil) 47 storageCommitTimer = metrics.GetOrRegisterTimer("chain/storage/commits", nil) 48 49 snapshotAccountReadTimer = metrics.GetOrRegisterTimer("chain/snapshot/account/reads", nil) 50 snapshotStorageReadTimer = metrics.GetOrRegisterTimer("chain/snapshot/storage/reads", nil) 51 snapshotCommitTimer = metrics.GetOrRegisterTimer("chain/snapshot/commits", nil) 52 53 blockInsertTimer = metrics.GetOrRegisterTimer("chain/inserts", nil) 54 blockExecutionTimer = metrics.GetOrRegisterTimer("chain/execution", nil) 55 blockWriteTimer = metrics.GetOrRegisterTimer("chain/write", nil) 56 blockAgeGauge = metrics.GetOrRegisterGauge("chain/block/age", nil) 57 ) 58 59 type ExtendedTxPosition struct { 60 evmstore.TxPosition 61 EventCreator idx.ValidatorID 62 } 63 64 // GetConsensusCallbacks returns single (for Service) callback instance. 65 func (s *Service) GetConsensusCallbacks() utypes.ConsensusCallbacks { 66 return utypes.ConsensusCallbacks{ 67 BeginBlock: consensusCallbackBeginBlockFn( 68 s.blockProcTasks, 69 &s.blockProcWg, 70 &s.blockBusyFlag, 71 s.store, 72 s.blockProcModules, 73 s.config.TxIndex, 74 &s.feed, 75 &s.emitters, 76 s.verWatcher, 77 &s.bootstrapping, 78 ), 79 } 80 } 81 82 // consensusCallbackBeginBlockFn takes only necessaries for block processing and 83 // makes types.BeginBlockFn. 84 func consensusCallbackBeginBlockFn( 85 parallelTasks *workers.Workers, 86 wg *sync.WaitGroup, 87 blockBusyFlag *uint32, 88 store *Store, 89 blockProc BlockProc, 90 txIndex bool, 91 feed *ServiceFeed, 92 emitters *[]*emitter.Emitter, 93 verWatcher *verwatcher.VerWarcher, 94 bootstrapping *bool, 95 ) utypes.BeginBlockFn { 96 return func(cBlock *utypes.Block) utypes.BlockCallbacks { 97 if *bootstrapping { 98 // ignore block processing during bootstrapping 99 return utypes.BlockCallbacks{ 100 ApplyEvent: func(dag.Event) {}, 101 EndBlock: func() *pos.Validators { 102 return nil 103 }, 104 } 105 } 106 wg.Wait() 107 start := time.Now() 108 109 // Note: take copies to avoid race conditions with API calls 110 bs := store.GetBlockState().Copy() 111 es := store.GetEpochState().Copy() 112 113 // merge cheaters to ensure that every cheater will get punished even if only previous (not current) Atropos observed a doublesign 114 // this feature is needed because blocks may be skipped even if cheaters list isn't empty 115 // otherwise cheaters would get punished after a first block where cheaters were observed 116 bs.EpochCheaters = mergeCheaters(bs.EpochCheaters, cBlock.Cheaters) 117 118 // Get stateDB 119 statedb, err := store.evm.StateDB(bs.FinalizedStateRoot) 120 if err != nil { 121 log.Crit("Failed to open StateDB", "err", err) 122 } 123 evmStateReader := &EvmStateReader{ 124 ServiceFeed: feed, 125 store: store, 126 } 127 128 eventProcessor := blockProc.EventsModule.Start(bs, es) 129 130 atroposTime := bs.LastBlock.Time + 1 131 atroposDegenerate := true 132 // events with txs 133 confirmedEvents := make(hash.OrderedEvents, 0, 3*es.Validators.Len()) 134 135 mpsCheatersMap := make(map[idx.ValidatorID]struct{}) 136 reportCheater := func(reporter, cheater idx.ValidatorID) { 137 mpsCheatersMap[cheater] = struct{}{} 138 } 139 140 return utypes.BlockCallbacks{ 141 ApplyEvent: func(_e dag.Event) { 142 e := _e.(native.EventI) 143 if cBlock.Event == e.ID() { 144 atroposTime = e.MedianTime() 145 atroposDegenerate = false 146 } 147 if e.AnyTxs() { 148 confirmedEvents = append(confirmedEvents, e.ID()) 149 } 150 if e.AnyMisbehaviourProofs() { 151 mps := store.GetEventPayload(e.ID()).MisbehaviourProofs() 152 for _, mp := range mps { 153 // self-contained parts of proofs are already checked by the checkers 154 if proof := mp.BlockVoteDoublesign; proof != nil { 155 reportCheater(e.Creator(), proof.Pair[0].Signed.Locator.Creator) 156 } 157 if proof := mp.EpochVoteDoublesign; proof != nil { 158 reportCheater(e.Creator(), proof.Pair[0].Signed.Locator.Creator) 159 } 160 if proof := mp.EventsDoublesign; proof != nil { 161 reportCheater(e.Creator(), proof.Pair[0].Locator.Creator) 162 } 163 if proof := mp.WrongBlockVote; proof != nil { 164 // all other votes are the same, see MinAccomplicesForProof 165 if proof.WrongEpoch { 166 actualBlockEpoch := store.FindBlockEpoch(proof.Block) 167 if actualBlockEpoch != 0 && actualBlockEpoch != proof.Pals[0].Val.Epoch { 168 for _, pal := range proof.Pals { 169 reportCheater(e.Creator(), pal.Signed.Locator.Creator) 170 } 171 } 172 } else { 173 actualRecord := store.GetFullBlockRecord(proof.Block) 174 if actualRecord != nil && proof.GetVote(0) != actualRecord.Hash() { 175 for _, pal := range proof.Pals { 176 reportCheater(e.Creator(), pal.Signed.Locator.Creator) 177 } 178 } 179 } 180 } 181 if proof := mp.WrongEpochVote; proof != nil { 182 // all other votes are the same, see MinAccomplicesForProof 183 vote := proof.Pals[0] 184 actualRecord := store.GetFullEpochRecord(vote.Val.Epoch) 185 if actualRecord == nil { 186 continue 187 } 188 if vote.Val.Vote != actualRecord.Hash() { 189 for _, pal := range proof.Pals { 190 reportCheater(e.Creator(), pal.Signed.Locator.Creator) 191 } 192 } 193 } 194 } 195 } 196 eventProcessor.ProcessConfirmedEvent(e) 197 for _, em := range *emitters { 198 em.OnEventConfirmed(e) 199 } 200 }, 201 EndBlock: func() (newValidators *pos.Validators) { 202 if atroposTime <= bs.LastBlock.Time { 203 atroposTime = bs.LastBlock.Time + 1 204 } 205 blockCtx := iblockproc.BlockCtx{ 206 Idx: bs.LastBlock.Idx + 1, 207 Time: atroposTime, 208 Atropos: cBlock.Event, 209 } 210 // Note: 211 // it's possible that a previous Atropos observes current Atropos (1) 212 // (even stronger statement is true - it's possible that current Atropos is equal to a previous Atropos). 213 // (1) is true when and only when ApplyEvent wasn't called. 214 // In other words, we should assume that every non-cheater root may be elected as an Atropos in any order, 215 // even if typically every previous Atropos happened-before current Atropos 216 // We have to skip block in case (1) to ensure that every block ID is unique. 217 // If Atropos ID wasn't used as a block ID, it wouldn't be required. 218 skipBlock := atroposDegenerate 219 // Check if empty block should be pruned 220 emptyBlock := confirmedEvents.Len() == 0 && cBlock.Cheaters.Len() == 0 221 skipBlock = skipBlock || (emptyBlock && blockCtx.Time < bs.LastBlock.Time+es.Rules.Blocks.MaxEmptyBlockSkipPeriod) 222 // Finalize the progress of eventProcessor 223 bs = eventProcessor.Finalize(blockCtx, skipBlock) // TODO: refactor to not mutate the bs, it is unclear 224 { // sort and merge MPs cheaters 225 mpsCheaters := make(utypes.Cheaters, 0, len(mpsCheatersMap)) 226 for vid := range mpsCheatersMap { 227 mpsCheaters = append(mpsCheaters, vid) 228 } 229 sort.Slice(mpsCheaters, func(i, j int) bool { 230 a, b := mpsCheaters[i], mpsCheaters[j] 231 return a < b 232 }) 233 bs.EpochCheaters = mergeCheaters(bs.EpochCheaters, mpsCheaters) 234 } 235 if skipBlock { 236 // save the latest block state even if block is skipped 237 store.SetBlockEpochState(bs, es) 238 log.Debug("Frame is skipped", "atropos", cBlock.Event.String()) 239 return nil 240 } 241 242 sealer := blockProc.SealerModule.Start(blockCtx, bs, es) 243 sealing := sealer.EpochSealing() 244 txListener := blockProc.TxListenerModule.Start(blockCtx, bs, es, statedb) 245 onNewLogAll := func(l *types.Log) { 246 txListener.OnNewLog(l) 247 // Note: it's possible for logs to get indexed twice by BR and block processing 248 if verWatcher != nil { 249 verWatcher.OnNewLog(l) 250 } 251 } 252 253 // skip LLR block/epoch deciding if not activated 254 if !es.Rules.Upgrades.Llr { 255 store.ModifyLlrState(func(llrs *LlrState) { 256 if llrs.LowestBlockToDecide == blockCtx.Idx { 257 llrs.LowestBlockToDecide++ 258 } 259 if sealing && es.Epoch+1 == llrs.LowestEpochToDecide { 260 llrs.LowestEpochToDecide++ 261 } 262 }) 263 } 264 265 // Providing default config 266 // In case of trace transaction node, this config is changed 267 evmCfg := u2u.DefaultVMConfig 268 if store.txtracer != nil { 269 evmCfg.Debug = true 270 evmCfg.Tracer = txtracer.NewTraceStructLogger(store.txtracer) 271 } 272 273 evmProcessor := blockProc.EVMModule.Start(blockCtx, statedb, evmStateReader, onNewLogAll, es.Rules, es.Rules.EvmChainConfig(store.GetUpgradeHeights())) 274 executionStart := time.Now() 275 276 // Execute pre-internal transactions 277 preInternalTxs := blockProc.PreTxTransactor.PopInternalTxs(blockCtx, bs, es, sealing, statedb) 278 preInternalReceipts := evmProcessor.Execute(preInternalTxs) 279 bs = txListener.Finalize() 280 for _, r := range preInternalReceipts { 281 if r.Status == 0 { 282 log.Warn("Pre-internal transaction reverted", "txid", r.TxHash.String()) 283 } 284 } 285 286 // Seal epoch if requested 287 if sealing { 288 sealer.Update(bs, es) 289 prevUpg := es.Rules.Upgrades 290 bs, es = sealer.SealEpoch() // TODO: refactor to not mutate the bs, it is unclear 291 if es.Rules.Upgrades != prevUpg { 292 store.AddUpgradeHeight(u2u.UpgradeHeight{ 293 Upgrades: es.Rules.Upgrades, 294 Height: blockCtx.Idx + 1, 295 }) 296 } 297 store.SetBlockEpochState(bs, es) 298 newValidators = es.Validators 299 txListener.Update(bs, es) 300 } 301 302 // At this point, newValidators may be returned and the rest of the code may be executed in a parallel thread 303 blockFn := func() { 304 // Execute post-internal transactions 305 internalTxs := blockProc.PostTxTransactor.PopInternalTxs(blockCtx, bs, es, sealing, statedb) 306 internalReceipts := evmProcessor.Execute(internalTxs) 307 for _, r := range internalReceipts { 308 if r.Status == 0 { 309 log.Warn("Internal transaction reverted", "txid", r.TxHash.String()) 310 } 311 } 312 313 // sort events by Lamport time 314 sort.Sort(confirmedEvents) 315 316 // new block 317 var block = &native.Block{ 318 Time: blockCtx.Time, 319 Atropos: cBlock.Event, 320 Events: hash.Events(confirmedEvents), 321 } 322 for _, tx := range append(preInternalTxs, internalTxs...) { 323 block.Txs = append(block.Txs, tx.Hash()) 324 } 325 326 block, blockEvents := spillBlockEvents(store, block, es.Rules) 327 txs := make(types.Transactions, 0, blockEvents.Len()*10) 328 for _, e := range blockEvents { 329 txs = append(txs, e.Txs()...) 330 } 331 332 _ = evmProcessor.Execute(txs) 333 334 evmBlock, skippedTxs, allReceipts := evmProcessor.Finalize() 335 block.SkippedTxs = skippedTxs 336 block.Root = hash.Hash(evmBlock.Root) 337 block.GasUsed = evmBlock.GasUsed 338 339 // memorize event position of each tx 340 txPositions := make(map[common.Hash]ExtendedTxPosition) 341 for _, e := range blockEvents { 342 for i, tx := range e.Txs() { 343 // If tx was met in multiple events, then assign to first ordered event 344 if _, ok := txPositions[tx.Hash()]; ok { 345 continue 346 } 347 txPositions[tx.Hash()] = ExtendedTxPosition{ 348 TxPosition: evmstore.TxPosition{ 349 Event: e.ID(), 350 EventOffset: uint32(i), 351 }, 352 EventCreator: e.Creator(), 353 } 354 } 355 } 356 // memorize block position of each tx 357 for i, tx := range evmBlock.Transactions { 358 // not skipped txs only 359 position := txPositions[tx.Hash()] 360 position.Block = blockCtx.Idx 361 position.BlockOffset = uint32(i) 362 txPositions[tx.Hash()] = position 363 } 364 365 // call OnNewReceipt 366 for i, r := range allReceipts { 367 creator := txPositions[r.TxHash].EventCreator 368 if creator != 0 && es.Validators.Get(creator) == 0 { 369 creator = 0 370 } 371 txListener.OnNewReceipt(evmBlock.Transactions[i], r, creator) 372 } 373 bs = txListener.Finalize() // TODO: refactor to not mutate the bs 374 bs.FinalizedStateRoot = block.Root 375 // At this point, block state is finalized 376 377 // Build index for not skipped txs 378 if txIndex { 379 for _, tx := range evmBlock.Transactions { 380 // not skipped txs only 381 store.evm.SetTxPosition(tx.Hash(), txPositions[tx.Hash()].TxPosition) 382 } 383 384 // Index receipts 385 // Note: it's possible for receipts to get indexed twice by BR and block processing 386 if allReceipts.Len() != 0 { 387 store.evm.SetReceipts(blockCtx.Idx, allReceipts) 388 for _, r := range allReceipts { 389 store.evm.IndexLogs(r.Logs...) 390 } 391 } 392 } 393 for _, tx := range append(preInternalTxs, internalTxs...) { 394 store.evm.SetTx(tx.Hash(), tx) 395 } 396 397 bs.LastBlock = blockCtx 398 bs.CheatersWritten = uint32(bs.EpochCheaters.Len()) 399 if sealing { 400 store.SetHistoryBlockEpochState(es.Epoch, bs, es) 401 store.SetEpochBlock(blockCtx.Idx+1, es.Epoch) 402 } 403 store.SetBlock(blockCtx.Idx, block) 404 store.SetBlockIndex(block.Atropos, blockCtx.Idx) 405 store.SetBlockEpochState(bs, es) 406 store.EvmStore().SetCachedEvmBlock(blockCtx.Idx, evmBlock) 407 updateLowestBlockToFill(blockCtx.Idx, store) 408 updateLowestEpochToFill(es.Epoch, store) 409 410 // Update the metrics touched during block processing 411 accountReadTimer.Update(statedb.AccountReads) 412 storageReadTimer.Update(statedb.StorageReads) 413 accountUpdateTimer.Update(statedb.AccountUpdates) 414 storageUpdateTimer.Update(statedb.StorageUpdates) 415 snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) 416 snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) 417 accountHashTimer.Update(statedb.AccountHashes) 418 storageHashTimer.Update(statedb.StorageHashes) 419 triehash := statedb.AccountHashes + statedb.StorageHashes 420 trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates 421 trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates 422 blockExecutionTimer.Update(time.Since(executionStart) - trieproc - triehash) 423 424 // Update the metrics touched by new block 425 headBlockGauge.Update(int64(blockCtx.Idx)) 426 headHeaderGauge.Update(int64(blockCtx.Idx)) 427 headFastBlockGauge.Update(int64(blockCtx.Idx)) 428 429 // Notify about new block 430 if feed != nil { 431 feed.newBlock.Send(evmcore.ChainHeadNotify{Block: evmBlock}) 432 var logs []*types.Log 433 for _, r := range allReceipts { 434 for _, l := range r.Logs { 435 logs = append(logs, l) 436 } 437 } 438 feed.newLogs.Send(logs) 439 } 440 441 commitStart := time.Now() 442 store.commitEVM(false) 443 444 // Update the metrics touched during block commit 445 accountCommitTimer.Update(statedb.AccountCommits) 446 storageCommitTimer.Update(statedb.StorageCommits) 447 snapshotCommitTimer.Update(statedb.SnapshotCommits) 448 blockWriteTimer.Update(time.Since(commitStart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) 449 blockInsertTimer.UpdateSince(start) 450 451 now := time.Now() 452 blockAge := now.Sub(block.Time.Time()) 453 log.Info("New block", "index", blockCtx.Idx, "id", block.Atropos, "gas_used", 454 evmBlock.GasUsed, "txs", fmt.Sprintf("%d/%d", len(evmBlock.Transactions), len(block.SkippedTxs)), 455 "age", utils.PrettyDuration(blockAge), "t", utils.PrettyDuration(now.Sub(start))) 456 blockAgeGauge.Update(int64(blockAge.Nanoseconds())) 457 } 458 if confirmedEvents.Len() != 0 { 459 atomic.StoreUint32(blockBusyFlag, 1) 460 wg.Add(1) 461 err := parallelTasks.Enqueue(func() { 462 defer atomic.StoreUint32(blockBusyFlag, 0) 463 defer wg.Done() 464 blockFn() 465 }) 466 if err != nil { 467 panic(err) 468 } 469 } else { 470 blockFn() 471 } 472 473 return newValidators 474 }, 475 } 476 } 477 } 478 479 func (s *Service) ReexecuteBlocks(from, to idx.Block) { 480 blockProc := s.blockProcModules 481 upgradeHeights := s.store.GetUpgradeHeights() 482 evmStateReader := s.GetEvmStateReader() 483 prev := s.store.GetBlock(from) 484 for b := from + 1; b <= to; b++ { 485 block := s.store.GetBlock(b) 486 blockCtx := iblockproc.BlockCtx{ 487 Idx: b, 488 Time: block.Time, 489 Atropos: block.Atropos, 490 } 491 statedb, err := s.store.evm.StateDB(prev.Root) 492 if err != nil { 493 log.Crit("Failue to re-execute blocks", "err", err) 494 } 495 es := s.store.GetHistoryEpochState(s.store.FindBlockEpoch(b)) 496 // Providing default config 497 // In case of trace transaction node, this config is changed 498 evmCfg := u2u.DefaultVMConfig 499 if s.store.txtracer != nil { 500 evmCfg.Debug = true 501 evmCfg.Tracer = txtracer.NewTraceStructLogger(s.store.txtracer) 502 } 503 evmProcessor := blockProc.EVMModule.Start(blockCtx, statedb, evmStateReader, func(t *types.Log) {}, es.Rules, es.Rules.EvmChainConfig(upgradeHeights)) 504 txs := s.store.GetBlockTxs(b, block) 505 evmProcessor.Execute(txs) 506 evmProcessor.Finalize() 507 _ = s.store.evm.Commit(b, block.Root, false) 508 s.store.evm.Cap() 509 s.mayCommit(false) 510 prev = block 511 } 512 } 513 514 func (s *Service) RecoverEVM() { 515 start := s.store.GetLatestBlockIndex() 516 for b := start; b >= 1 && b > start-20000; b-- { 517 block := s.store.GetBlock(b) 518 if block == nil { 519 break 520 } 521 if s.store.evm.HasStateDB(block.Root) { 522 if b != start { 523 s.Log.Warn("Reexecuting blocks after abrupt stopping", "from", b, "to", start) 524 s.ReexecuteBlocks(b, start) 525 } 526 break 527 } 528 } 529 } 530 531 // spillBlockEvents excludes first events which exceed MaxBlockGas 532 func spillBlockEvents(store *Store, block *native.Block, network u2u.Rules) (*native.Block, native.EventPayloads) { 533 fullEvents := make(native.EventPayloads, len(block.Events)) 534 if len(block.Events) == 0 { 535 return block, fullEvents 536 } 537 gasPowerUsedSum := uint64(0) 538 // iterate in reversed order 539 for i := len(block.Events) - 1; ; i-- { 540 id := block.Events[i] 541 e := store.GetEventPayload(id) 542 if e == nil { 543 log.Crit("Block event not found", "event", id.String()) 544 } 545 fullEvents[i] = e 546 gasPowerUsedSum += e.GasPowerUsed() 547 // stop if limit is exceeded, erase [:i] events 548 if gasPowerUsedSum > network.Blocks.MaxBlockGas { 549 // spill 550 block.Events = block.Events[i+1:] 551 fullEvents = fullEvents[i+1:] 552 break 553 } 554 if i == 0 { 555 break 556 } 557 } 558 return block, fullEvents 559 } 560 561 func mergeCheaters(a, b utypes.Cheaters) utypes.Cheaters { 562 if len(b) == 0 { 563 return a 564 } 565 if len(a) == 0 { 566 return b 567 } 568 aSet := a.Set() 569 merged := make(utypes.Cheaters, 0, len(b)+len(a)) 570 for _, v := range a { 571 merged = append(merged, v) 572 } 573 for _, v := range b { 574 if _, ok := aSet[v]; !ok { 575 merged = append(merged, v) 576 } 577 } 578 return merged 579 }