github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/powchain/log_processing.go (about) 1 package powchain 2 3 import ( 4 "context" 5 "encoding/binary" 6 "fmt" 7 "math/big" 8 "time" 9 10 "github.com/ethereum/go-ethereum" 11 "github.com/ethereum/go-ethereum/accounts/abi/bind" 12 "github.com/ethereum/go-ethereum/common" 13 gethTypes "github.com/ethereum/go-ethereum/core/types" 14 "github.com/pkg/errors" 15 "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" 16 statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" 17 "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" 18 coreState "github.com/prysmaticlabs/prysm/beacon-chain/core/state" 19 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" 20 contracts "github.com/prysmaticlabs/prysm/contracts/deposit-contract" 21 protodb "github.com/prysmaticlabs/prysm/proto/beacon/db" 22 ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1" 23 "github.com/prysmaticlabs/prysm/shared/bytesutil" 24 "github.com/prysmaticlabs/prysm/shared/hashutil" 25 "github.com/prysmaticlabs/prysm/shared/params" 26 "github.com/sirupsen/logrus" 27 ) 28 29 var ( 30 depositEventSignature = hashutil.HashKeccak256([]byte("DepositEvent(bytes,bytes,bytes,bytes,bytes)")) 31 ) 32 33 const eth1DataSavingInterval = 100 34 const maxTolerableDifference = 50 35 const defaultEth1HeaderReqLimit = uint64(1000) 36 const depositlogRequestLimit = 10000 37 const additiveFactorMultiplier = 0.10 38 const multiplicativeDecreaseDivisor = 2 39 40 func tooMuchDataRequestedError(err error) bool { 41 // this error is only infura specific (other providers might have different error messages) 42 return err.Error() == "query returned more than 10000 results" 43 } 44 45 // Eth2GenesisPowchainInfo retrieves the genesis time and eth1 block number of the beacon chain 46 // from the deposit contract. 47 func (s *Service) Eth2GenesisPowchainInfo() (uint64, *big.Int) { 48 return s.chainStartData.GenesisTime, big.NewInt(int64(s.chainStartData.GenesisBlock)) 49 } 50 51 // ProcessETH1Block processes the logs from the provided eth1Block. 52 func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error { 53 query := ethereum.FilterQuery{ 54 Addresses: []common.Address{ 55 s.cfg.DepositContract, 56 }, 57 FromBlock: blkNum, 58 ToBlock: blkNum, 59 } 60 logs, err := s.httpLogger.FilterLogs(ctx, query) 61 if err != nil { 62 return err 63 } 64 for _, filterLog := range logs { 65 // ignore logs that are not of the required block number 66 if filterLog.BlockNumber != blkNum.Uint64() { 67 continue 68 } 69 if err := s.ProcessLog(ctx, filterLog); err != nil { 70 return errors.Wrap(err, "could not process log") 71 } 72 } 73 if !s.chainStartData.Chainstarted { 74 if err := s.checkBlockNumberForChainStart(ctx, blkNum); err != nil { 75 return err 76 } 77 } 78 return nil 79 } 80 81 // ProcessLog is the main method which handles the processing of all 82 // logs from the deposit contract on the ETH1.0 chain. 83 func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) error { 84 s.processingLock.RLock() 85 defer s.processingLock.RUnlock() 86 // Process logs according to their event signature. 87 if depositLog.Topics[0] == depositEventSignature { 88 if err := s.ProcessDepositLog(ctx, depositLog); err != nil { 89 return errors.Wrap(err, "Could not process deposit log") 90 } 91 if s.lastReceivedMerkleIndex%eth1DataSavingInterval == 0 { 92 return s.savePowchainData(ctx) 93 } 94 return nil 95 } 96 log.WithField("signature", fmt.Sprintf("%#x", depositLog.Topics[0])).Debug("Not a valid event signature") 97 return nil 98 } 99 100 // ProcessDepositLog processes the log which had been received from 101 // the ETH1.0 chain by trying to ascertain which participant deposited 102 // in the contract. 103 func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Log) error { 104 pubkey, withdrawalCredentials, amount, signature, merkleTreeIndex, err := contracts.UnpackDepositLogData(depositLog.Data) 105 if err != nil { 106 return errors.Wrap(err, "Could not unpack log") 107 } 108 // If we have already seen this Merkle index, skip processing the log. 109 // This can happen sometimes when we receive the same log twice from the 110 // ETH1.0 network, and prevents us from updating our trie 111 // with the same log twice, causing an inconsistent state root. 112 index := int64(binary.LittleEndian.Uint64(merkleTreeIndex)) 113 if index <= s.lastReceivedMerkleIndex { 114 return nil 115 } 116 117 if index != s.lastReceivedMerkleIndex+1 { 118 missedDepositLogsCount.Inc() 119 return errors.Errorf("received incorrect merkle index: wanted %d but got %d", s.lastReceivedMerkleIndex+1, index) 120 } 121 s.lastReceivedMerkleIndex = index 122 123 // We then decode the deposit input in order to create a deposit object 124 // we can store in our persistent DB. 125 depositData := ðpb.Deposit_Data{ 126 Amount: bytesutil.FromBytes8(amount), 127 PublicKey: pubkey, 128 Signature: signature, 129 WithdrawalCredentials: withdrawalCredentials, 130 } 131 132 depositHash, err := depositData.HashTreeRoot() 133 if err != nil { 134 return errors.Wrap(err, "Unable to determine hashed value of deposit") 135 } 136 137 // Defensive check to validate incoming index. 138 if s.depositTrie.NumOfItems() != int(index) { 139 return errors.Errorf("invalid deposit index received: wanted %d but got %d", s.depositTrie.NumOfItems(), index) 140 } 141 s.depositTrie.Insert(depositHash[:], int(index)) 142 143 deposit := ðpb.Deposit{ 144 Data: depositData, 145 } 146 // Only generate the proofs during pre-genesis. 147 if !s.chainStartData.Chainstarted { 148 proof, err := s.depositTrie.MerkleProof(int(index)) 149 if err != nil { 150 return errors.Wrap(err, "Unable to generate merkle proof for deposit") 151 } 152 deposit.Proof = proof 153 } 154 155 // We always store all historical deposits in the DB. 156 err = s.cfg.DepositCache.InsertDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.Root()) 157 if err != nil { 158 return errors.Wrap(err, "unable to insert deposit into cache") 159 } 160 validData := true 161 if !s.chainStartData.Chainstarted { 162 s.chainStartData.ChainstartDeposits = append(s.chainStartData.ChainstartDeposits, deposit) 163 root := s.depositTrie.Root() 164 eth1Data := ðpb.Eth1Data{ 165 DepositRoot: root[:], 166 DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)), 167 } 168 if err := s.processDeposit(ctx, eth1Data, deposit); err != nil { 169 log.Errorf("Invalid deposit processed: %v", err) 170 validData = false 171 } 172 } else { 173 s.cfg.DepositCache.InsertPendingDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.Root()) 174 } 175 if validData { 176 log.WithFields(logrus.Fields{ 177 "eth1Block": depositLog.BlockNumber, 178 "publicKey": fmt.Sprintf("%#x", depositData.PublicKey), 179 "merkleTreeIndex": index, 180 }).Debug("Deposit registered from deposit contract") 181 validDepositsCount.Inc() 182 // Notify users what is going on, from time to time. 183 if !s.chainStartData.Chainstarted { 184 deposits := len(s.chainStartData.ChainstartDeposits) 185 if deposits%512 == 0 { 186 valCount, err := helpers.ActiveValidatorCount(s.preGenesisState, 0) 187 if err != nil { 188 log.WithError(err).Error("Could not determine active validator count from pre genesis state") 189 } 190 log.WithFields(logrus.Fields{ 191 "deposits": deposits, 192 "genesisValidators": valCount, 193 }).Info("Processing deposits from Ethereum 1 chain") 194 } 195 } 196 } else { 197 log.WithFields(logrus.Fields{ 198 "eth1Block": depositLog.BlockHash.Hex(), 199 "eth1Tx": depositLog.TxHash.Hex(), 200 "merkleTreeIndex": index, 201 }).Info("Invalid deposit registered in deposit contract") 202 } 203 return nil 204 } 205 206 // ProcessChainStart processes the log which had been received from 207 // the ETH1.0 chain by trying to determine when to start the beacon chain. 208 func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte, blockNumber *big.Int) { 209 s.chainStartData.Chainstarted = true 210 s.chainStartData.GenesisBlock = blockNumber.Uint64() 211 212 chainStartTime := time.Unix(int64(genesisTime), 0) 213 214 for i := range s.chainStartData.ChainstartDeposits { 215 proof, err := s.depositTrie.MerkleProof(i) 216 if err != nil { 217 log.Errorf("Unable to generate deposit proof %v", err) 218 } 219 s.chainStartData.ChainstartDeposits[i].Proof = proof 220 } 221 222 root := s.depositTrie.Root() 223 s.chainStartData.Eth1Data = ðpb.Eth1Data{ 224 DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)), 225 DepositRoot: root[:], 226 BlockHash: eth1BlockHash[:], 227 } 228 229 log.WithFields(logrus.Fields{ 230 "ChainStartTime": chainStartTime, 231 }).Info("Minimum number of validators reached for beacon-chain to start") 232 s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ 233 Type: statefeed.ChainStarted, 234 Data: &statefeed.ChainStartedData{ 235 StartTime: chainStartTime, 236 }, 237 }) 238 if err := s.savePowchainData(s.ctx); err != nil { 239 // continue on, if the save fails as this will get re-saved 240 // in the next interval. 241 log.Error(err) 242 } 243 } 244 245 func (s *Service) createGenesisTime(timeStamp uint64) uint64 { 246 // adds in the genesis delay to the eth1 block time 247 // on which it was triggered. 248 return timeStamp + params.BeaconConfig().GenesisDelay 249 } 250 251 // processPastLogs processes all the past logs from the deposit contract and 252 // updates the deposit trie with the data from each individual log. 253 func (s *Service) processPastLogs(ctx context.Context) error { 254 currentBlockNum := s.latestEth1Data.LastRequestedBlock 255 deploymentBlock := params.BeaconNetworkConfig().ContractDeploymentBlock 256 // Start from the deployment block if our last requested block 257 // is behind it. This is as the deposit logs can only start from the 258 // block of the deployment of the deposit contract. 259 if deploymentBlock > currentBlockNum { 260 currentBlockNum = deploymentBlock 261 } 262 // To store all blocks. 263 headersMap := make(map[uint64]*gethTypes.Header) 264 rawLogCount, err := s.depositContractCaller.GetDepositCount(&bind.CallOpts{}) 265 if err != nil { 266 return err 267 } 268 logCount := binary.LittleEndian.Uint64(rawLogCount) 269 270 // Batch request the desired headers and store them in a 271 // map for quick access. 272 requestHeaders := func(startBlk uint64, endBlk uint64) error { 273 headers, err := s.batchRequestHeaders(startBlk, endBlk) 274 if err != nil { 275 return err 276 } 277 for _, h := range headers { 278 if h != nil && h.Number != nil { 279 headersMap[h.Number.Uint64()] = h 280 } 281 } 282 return nil 283 } 284 latestFollowHeight, err := s.followBlockHeight(ctx) 285 if err != nil { 286 return err 287 } 288 289 batchSize := s.cfg.Eth1HeaderReqLimit 290 additiveFactor := uint64(float64(batchSize) * additiveFactorMultiplier) 291 292 for currentBlockNum < latestFollowHeight { 293 start := currentBlockNum 294 end := currentBlockNum + batchSize 295 // Appropriately bound the request, as we do not 296 // want request blocks beyond the current follow distance. 297 if end > latestFollowHeight { 298 end = latestFollowHeight 299 } 300 query := ethereum.FilterQuery{ 301 Addresses: []common.Address{ 302 s.cfg.DepositContract, 303 }, 304 FromBlock: big.NewInt(int64(start)), 305 ToBlock: big.NewInt(int64(end)), 306 } 307 remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1) 308 // only change the end block if the remaining logs are below the required log limit. 309 // reset our query and end block in this case. 310 withinLimit := remainingLogs < depositlogRequestLimit 311 aboveFollowHeight := end >= latestFollowHeight 312 if withinLimit && aboveFollowHeight { 313 query.ToBlock = big.NewInt(int64(latestFollowHeight)) 314 end = latestFollowHeight 315 } 316 logs, err := s.httpLogger.FilterLogs(ctx, query) 317 if err != nil { 318 if tooMuchDataRequestedError(err) { 319 if batchSize == 0 { 320 return errors.New("batch size is zero") 321 } 322 323 // multiplicative decrease 324 batchSize /= multiplicativeDecreaseDivisor 325 continue 326 } 327 return err 328 } 329 // Only request headers before chainstart to correctly determine 330 // genesis. 331 if !s.chainStartData.Chainstarted { 332 if err := requestHeaders(start, end); err != nil { 333 return err 334 } 335 } 336 337 for _, filterLog := range logs { 338 if filterLog.BlockNumber > currentBlockNum { 339 if err := s.checkHeaderRange(currentBlockNum, filterLog.BlockNumber-1, headersMap, requestHeaders); err != nil { 340 return err 341 } 342 // set new block number after checking for chainstart for previous block. 343 s.latestEth1Data.LastRequestedBlock = currentBlockNum 344 currentBlockNum = filterLog.BlockNumber 345 } 346 if err := s.ProcessLog(ctx, filterLog); err != nil { 347 return err 348 } 349 } 350 if err := s.checkHeaderRange(currentBlockNum, end, headersMap, requestHeaders); err != nil { 351 return err 352 } 353 currentBlockNum = end 354 355 if batchSize < s.cfg.Eth1HeaderReqLimit { 356 // update the batchSize with additive increase 357 batchSize += additiveFactor 358 if batchSize > s.cfg.Eth1HeaderReqLimit { 359 batchSize = s.cfg.Eth1HeaderReqLimit 360 } 361 } 362 } 363 364 s.latestEth1Data.LastRequestedBlock = currentBlockNum 365 366 c, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx) 367 if err != nil { 368 return err 369 } 370 fRoot := bytesutil.ToBytes32(c.Root) 371 // Return if no checkpoint exists yet. 372 if fRoot == params.BeaconConfig().ZeroHash { 373 return nil 374 } 375 fState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot) 376 if err != nil { 377 return err 378 } 379 if fState != nil && !fState.IsNil() && fState.Eth1DepositIndex() > 0 { 380 s.cfg.DepositCache.PrunePendingDeposits(ctx, int64(fState.Eth1DepositIndex())) 381 } 382 return nil 383 } 384 385 // requestBatchedHeadersAndLogs requests and processes all the headers and 386 // logs from the period last polled to now. 387 func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error { 388 // We request for the nth block behind the current head, in order to have 389 // stabilized logs when we retrieve it from the 1.0 chain. 390 391 requestedBlock, err := s.followBlockHeight(ctx) 392 if err != nil { 393 return err 394 } 395 if requestedBlock > s.latestEth1Data.LastRequestedBlock && 396 requestedBlock-s.latestEth1Data.LastRequestedBlock > maxTolerableDifference { 397 log.Infof("Falling back to historical headers and logs sync. Current difference is %d", requestedBlock-s.latestEth1Data.LastRequestedBlock) 398 return s.processPastLogs(ctx) 399 } 400 for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ { 401 // Cache eth1 block header here. 402 _, err := s.BlockHashByHeight(ctx, big.NewInt(int64(i))) 403 if err != nil { 404 return err 405 } 406 err = s.ProcessETH1Block(ctx, big.NewInt(int64(i))) 407 if err != nil { 408 return err 409 } 410 s.latestEth1Data.LastRequestedBlock = i 411 } 412 413 return nil 414 } 415 416 func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int) ([32]byte, uint64, error) { 417 hash, err := s.BlockHashByHeight(ctx, blkNum) 418 if err != nil { 419 return [32]byte{}, 0, errors.Wrap(err, "could not get eth1 block hash") 420 } 421 if hash == [32]byte{} { 422 return [32]byte{}, 0, errors.Wrap(err, "got empty block hash") 423 } 424 timeStamp, err := s.BlockTimeByHeight(ctx, blkNum) 425 if err != nil { 426 return [32]byte{}, 0, errors.Wrap(err, "could not get block timestamp") 427 } 428 return hash, timeStamp, nil 429 } 430 431 // checkBlockNumberForChainStart checks the given block number for if chainstart has occurred. 432 func (s *Service) checkBlockNumberForChainStart(ctx context.Context, blkNum *big.Int) error { 433 hash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum) 434 if err != nil { 435 return err 436 } 437 s.checkForChainstart(hash, blkNum, timeStamp) 438 return nil 439 } 440 441 func (s *Service) checkHeaderForChainstart(header *gethTypes.Header) { 442 s.checkForChainstart(header.Hash(), header.Number, header.Time) 443 } 444 445 func (s *Service) checkHeaderRange(start, end uint64, headersMap map[uint64]*gethTypes.Header, 446 requestHeaders func(uint64, uint64) error) error { 447 for i := start; i <= end; i++ { 448 if !s.chainStartData.Chainstarted { 449 h, ok := headersMap[i] 450 if !ok { 451 if err := requestHeaders(i, end); err != nil { 452 return err 453 } 454 // Retry this block. 455 i-- 456 continue 457 } 458 s.checkHeaderForChainstart(h) 459 } 460 } 461 return nil 462 } 463 464 // retrieves the current active validator count and genesis time from 465 // the provided block time. 466 func (s *Service) currentCountAndTime(blockTime uint64) (uint64, uint64) { 467 if s.preGenesisState.NumValidators() == 0 { 468 return 0, 0 469 } 470 valCount, err := helpers.ActiveValidatorCount(s.preGenesisState, 0) 471 if err != nil { 472 log.WithError(err).Error("Could not determine active validator count from pre genesis state") 473 return 0, 0 474 } 475 return valCount, s.createGenesisTime(blockTime) 476 } 477 478 func (s *Service) checkForChainstart(blockHash [32]byte, blockNumber *big.Int, blockTime uint64) { 479 valCount, genesisTime := s.currentCountAndTime(blockTime) 480 if valCount == 0 { 481 return 482 } 483 triggered := coreState.IsValidGenesisState(valCount, genesisTime) 484 if triggered { 485 s.chainStartData.GenesisTime = genesisTime 486 s.ProcessChainStart(s.chainStartData.GenesisTime, blockHash, blockNumber) 487 } 488 } 489 490 // save all powchain related metadata to disk. 491 func (s *Service) savePowchainData(ctx context.Context) error { 492 pbState, err := v1.ProtobufBeaconState(s.preGenesisState.InnerStateUnsafe()) 493 if err != nil { 494 return err 495 } 496 eth1Data := &protodb.ETH1ChainData{ 497 CurrentEth1Data: s.latestEth1Data, 498 ChainstartData: s.chainStartData, 499 BeaconState: pbState, // I promise not to mutate it! 500 Trie: s.depositTrie.ToProto(), 501 DepositContainers: s.cfg.DepositCache.AllDepositContainers(ctx), 502 } 503 return s.cfg.BeaconDB.SavePowchainData(ctx, eth1Data) 504 }