github.com/Blockdaemon/celo-blockchain@v0.0.0-20200129231733-e667f6b08419/consensus/istanbul/backend/engine.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package backend 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 "math/big" 24 "time" 25 26 "github.com/ethereum/go-ethereum/common" 27 "github.com/ethereum/go-ethereum/consensus" 28 "github.com/ethereum/go-ethereum/consensus/istanbul" 29 istanbulCore "github.com/ethereum/go-ethereum/consensus/istanbul/core" 30 "github.com/ethereum/go-ethereum/consensus/istanbul/validator" 31 gpm "github.com/ethereum/go-ethereum/contract_comm/gasprice_minimum" 32 "github.com/ethereum/go-ethereum/core/state" 33 "github.com/ethereum/go-ethereum/core/types" 34 blscrypto "github.com/ethereum/go-ethereum/crypto/bls" 35 "github.com/ethereum/go-ethereum/log" 36 "github.com/ethereum/go-ethereum/rlp" 37 "github.com/ethereum/go-ethereum/rpc" 38 lru "github.com/hashicorp/golang-lru" 39 "golang.org/x/crypto/sha3" 40 ) 41 42 const ( 43 inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory 44 inmemoryPeers = 40 45 inmemoryMessages = 1024 46 mobileAllowedClockSkew uint64 = 5 47 ) 48 49 var ( 50 // errInvalidProposal is returned when a prposal is malformed. 51 errInvalidProposal = errors.New("invalid proposal") 52 // errInvalidSignature is returned when given signature is not signed by given 53 // address. 54 errInvalidSignature = errors.New("invalid signature") 55 // errInsufficientSeals is returned when there is not enough signatures to 56 // pass the quorum check. 57 errInsufficientSeals = errors.New("not enough seals to reach quorum") 58 // errUnknownBlock is returned when the list of validators or header is requested for a block 59 // that is not part of the local blockchain. 60 errUnknownBlock = errors.New("unknown block") 61 // errUnauthorized is returned if a header is signed by a non authorized entity. 62 errUnauthorized = errors.New("not an elected validator") 63 // errInvalidDifficulty is returned if the difficulty of a block is not 1 64 errInvalidDifficulty = errors.New("invalid difficulty") 65 // errInvalidExtraDataFormat is returned when the extra data format is incorrect 66 errInvalidExtraDataFormat = errors.New("invalid extra data format") 67 // errInvalidMixDigest is returned if a block's mix digest is not Istanbul digest. 68 errInvalidMixDigest = errors.New("invalid Istanbul mix digest") 69 // errInvalidNonce is returned if a block's nonce is invalid 70 errInvalidNonce = errors.New("invalid nonce") 71 // errCoinbase is returned if a block's coinbase is invalid 72 errInvalidCoinbase = errors.New("invalid coinbase") 73 // errInvalidUncleHash is returned if a block contains an non-empty uncle list. 74 errInvalidUncleHash = errors.New("non empty uncle hash") 75 // errInvalidTimestamp is returned if the timestamp of a block is lower than the previous block's timestamp + the minimum block period. 76 errInvalidTimestamp = errors.New("invalid timestamp") 77 // errInvalidVotingChain is returned if an authorization list is attempted to 78 // be modified via out-of-range or non-contiguous headers. 79 errInvalidVotingChain = errors.New("invalid voting chain") 80 // errInvalidAggregatedSeal is returned if the aggregated seal is invalid. 81 errInvalidAggregatedSeal = errors.New("invalid aggregated seal") 82 // errInvalidAggregatedSeal is returned if the aggregated seal is missing. 83 errEmptyAggregatedSeal = errors.New("empty aggregated seal") 84 // errMismatchTxhashes is returned if the TxHash in header is mismatch. 85 errMismatchTxhashes = errors.New("mismatch transactions hashes") 86 // errInvalidValidatorSetDiff is returned if the header contains invalid validator set diff 87 errInvalidValidatorSetDiff = errors.New("invalid validator set diff") 88 // errUnauthorizedAnnounceMessage is returned when the received announce message is from 89 // an unregistered validator 90 errUnauthorizedAnnounceMessage = errors.New("unauthorized announce message") 91 // errUnauthorizedValEnodesShareMessage is returned when the received valEnodeshare message is from 92 // an unauthorized sender 93 errUnauthorizedValEnodesShareMessage = errors.New("unauthorized valenodesshare message") 94 ) 95 96 var ( 97 defaultDifficulty = big.NewInt(1) 98 nilUncleHash = types.CalcUncleHash(nil) // Always Keccak256(RLP([])) as uncles are meaningless outside of PoW. 99 emptyNonce = types.BlockNonce{} 100 now = time.Now 101 102 inmemoryAddresses = 20 // Number of recent addresses from ecrecover 103 recentAddresses, _ = lru.NewARC(inmemoryAddresses) 104 ) 105 106 // Author retrieves the Ethereum address of the account that minted the given 107 // block, which may be different from the header's coinbase if a consensus 108 // engine is based on signatures. 109 func (sb *Backend) Author(header *types.Header) (common.Address, error) { 110 return ecrecover(header) 111 } 112 113 // VerifyHeader checks whether a header conforms to the consensus rules of a 114 // given engine. Verifies the seal regardless of given "seal" argument. 115 func (sb *Backend) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { 116 return sb.verifyHeader(chain, header, nil) 117 } 118 119 // verifyHeader checks whether a header conforms to the consensus rules.The 120 // caller may optionally pass in a batch of parents (ascending order) to avoid 121 // looking those up from the database. This is useful for concurrently verifying 122 // a batch of new headers. 123 func (sb *Backend) verifyHeader(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { 124 if header.Number == nil { 125 return errUnknownBlock 126 } 127 128 // If the full chain isn't available (as on mobile devices), don't reject future blocks 129 // This is due to potential clock skew 130 var allowedFutureBlockTime = big.NewInt(now().Unix()) 131 if !chain.Config().FullHeaderChainAvailable { 132 allowedFutureBlockTime = new(big.Int).Add(allowedFutureBlockTime, new(big.Int).SetUint64(mobileAllowedClockSkew)) 133 } 134 135 // Don't waste time checking blocks from the future 136 if header.Time.Cmp(allowedFutureBlockTime) > 0 { 137 return consensus.ErrFutureBlock 138 } 139 140 // Ensure that the extra data format is satisfied 141 if _, err := types.ExtractIstanbulExtra(header); err != nil { 142 return errInvalidExtraDataFormat 143 } 144 145 // Ensure that the nonce is empty (Istanbul was originally using it for a candidate validator vote) 146 if header.Nonce != (emptyNonce) { 147 return errInvalidNonce 148 } 149 150 // Ensure that the mix digest is zero as we don't have fork protection currently 151 if header.MixDigest != types.IstanbulDigest { 152 return errInvalidMixDigest 153 } 154 // Ensure that the block doesn't contain any uncles which are meaningless in Istanbul 155 if header.UncleHash != nilUncleHash { 156 return errInvalidUncleHash 157 } 158 // Ensure that the block's difficulty is meaningful (may not be correct at this point) 159 if header.Difficulty == nil || header.Difficulty.Cmp(defaultDifficulty) != 0 { 160 return errInvalidDifficulty 161 } 162 163 return sb.verifyCascadingFields(chain, header, parents) 164 } 165 166 // verifyCascadingFields verifies all the header fields that are not standalone, 167 // rather depend on a batch of previous headers. The caller may optionally pass 168 // in a batch of parents (ascending order) to avoid looking those up from the 169 // database. This is useful for concurrently verifying a batch of new headers. 170 func (sb *Backend) verifyCascadingFields(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { 171 // The genesis block is the always valid dead-end 172 number := header.Number.Uint64() 173 if number == 0 { 174 return nil 175 } 176 // Ensure that the block's timestamp isn't too close to it's parent 177 var parent *types.Header 178 if len(parents) > 0 { 179 parent = parents[len(parents)-1] 180 } else { 181 parent = chain.GetHeader(header.ParentHash, number-1) 182 } 183 if chain.Config().FullHeaderChainAvailable { 184 185 if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash { 186 return consensus.ErrUnknownAncestor 187 } 188 if parent.Time.Uint64()+sb.config.BlockPeriod > header.Time.Uint64() { 189 return errInvalidTimestamp 190 } 191 // Verify validators in extraData. Validators in snapshot and extraData should be the same. 192 if err := sb.verifySigner(chain, header, parents); err != nil { 193 return err 194 } 195 } 196 197 return sb.verifyAggregatedSeals(chain, header, parents) 198 } 199 200 // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers 201 // concurrently. The method returns a quit channel to abort the operations and 202 // a results channel to retrieve the async verifications (the order is that of 203 // the input slice). 204 func (sb *Backend) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { 205 abort := make(chan struct{}) 206 results := make(chan error, len(headers)) 207 go func() { 208 for i, header := range headers { 209 err := sb.verifyHeader(chain, header, headers[:i]) 210 211 select { 212 case <-abort: 213 return 214 case results <- err: 215 } 216 } 217 }() 218 return abort, results 219 } 220 221 // VerifyUncles verifies that the given block's uncles conform to the consensus 222 // rules of a given engine. 223 func (sb *Backend) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { 224 if len(block.Uncles()) > 0 { 225 return errInvalidUncleHash 226 } 227 return nil 228 } 229 230 // verifySigner checks whether the signer is in parent's validator set 231 func (sb *Backend) verifySigner(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { 232 // Verifying the genesis block is not supported 233 number := header.Number.Uint64() 234 if number == 0 { 235 return errUnknownBlock 236 } 237 238 // Retrieve the snapshot needed to verify this header and cache it 239 snap, err := sb.snapshot(chain, number-1, header.ParentHash, parents) 240 if err != nil { 241 return err 242 } 243 244 // resolve the authorization key and check against signers 245 signer, err := ecrecover(header) 246 if err != nil { 247 return err 248 } 249 250 // Signer should be in the validator set of previous block's extraData. 251 if _, v := snap.ValSet.GetByAddress(signer); v == nil { 252 return errUnauthorized 253 } 254 return nil 255 } 256 257 // verifyAggregatedSeals checks whether the aggregated seal and parent seal in the header is 258 // signed on by the block's validators and the parent block's validators respectively 259 func (sb *Backend) verifyAggregatedSeals(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { 260 number := header.Number.Uint64() 261 // We don't need to verify committed seals in the genesis block 262 if number == 0 { 263 return nil 264 } 265 266 extra, err := types.ExtractIstanbulExtra(header) 267 if err != nil { 268 return err 269 } 270 271 // The length of Committed seals should be larger than 0 272 if len(extra.AggregatedSeal.Signature) == 0 { 273 return errEmptyAggregatedSeal 274 } 275 276 // Check the signatures on the current header 277 snap, err := sb.snapshot(chain, number-1, header.ParentHash, parents) 278 if err != nil { 279 return err 280 } 281 validators := snap.ValSet.Copy() 282 err = sb.verifyAggregatedSeal(header.Hash(), validators, extra.AggregatedSeal) 283 if err != nil { 284 return err 285 } 286 287 // The genesis block is skipped since it has no parents. 288 // The first block is also skipped, since its parent 289 // is the genesis block which contains no parent signatures. 290 // The parent commit messages are only used for the uptime calculation, 291 // so ultralight clients don't need to verify them 292 if number > 1 && chain.Config().FullHeaderChainAvailable { 293 sb.logger.Trace("verifyAggregatedSeals: verifying parent seals for block", "num", number) 294 var parentValidators istanbul.ValidatorSet 295 // The first block in an epoch will have a different validator set than the block 296 // before it. If the current block is the first block in an epoch, we need to fetch the previous 297 // validator set to validate the parent signatures. 298 if number%sb.config.Epoch == 1 { 299 snap, err := sb.snapshot(chain, number-2, common.Hash{}, nil) 300 if err != nil { 301 return err 302 } 303 parentValidators = snap.ValSet.Copy() 304 } else { 305 parentValidators = validators.Copy() 306 } 307 308 // Check the signatures made by the validator set corresponding to the 309 // parent block's hash. We use header.ParentHash to handle both 310 // ultralight and non-ultralight cases. 311 // parent.Hash() would correspond to the previous epoch 312 // block in ultralight, while the extra.ParentCommit is made on the block which was 313 // immediately before the current block. 314 return sb.verifyAggregatedSeal(header.ParentHash, parentValidators, extra.ParentAggregatedSeal) 315 } 316 317 return nil 318 } 319 320 func (sb *Backend) verifyAggregatedSeal(headerHash common.Hash, validators istanbul.ValidatorSet, aggregatedSeal types.IstanbulAggregatedSeal) error { 321 logger := sb.logger.New("func", "Backend.verifyAggregatedSeal()") 322 if len(aggregatedSeal.Signature) != types.IstanbulExtraBlsSignature { 323 return errInvalidAggregatedSeal 324 } 325 326 proposalSeal := istanbulCore.PrepareCommittedSeal(headerHash, aggregatedSeal.Round) 327 // Find which public keys signed from the provided validator set 328 publicKeys := []blscrypto.SerializedPublicKey{} 329 for i := 0; i < validators.Size(); i++ { 330 if aggregatedSeal.Bitmap.Bit(i) == 1 { 331 pubKey := validators.GetByIndex(uint64(i)).BLSPublicKey() 332 publicKeys = append(publicKeys, pubKey) 333 } 334 } 335 // The length of a valid seal should be greater than the minimum quorum size 336 if len(publicKeys) < validators.MinQuorumSize() { 337 logger.Error("Aggregated seal does not aggregate enough seals", "numSeals", len(publicKeys), "minimum quorum size", validators.MinQuorumSize()) 338 return errInsufficientSeals 339 } 340 err := blscrypto.VerifyAggregatedSignature(publicKeys, proposalSeal, []byte{}, aggregatedSeal.Signature, false) 341 if err != nil { 342 logger.Error("Unable to verify aggregated signature", "err", err) 343 return errInvalidSignature 344 } 345 346 return nil 347 } 348 349 // VerifySeal checks whether the crypto seal on a header is valid according to 350 // the consensus rules of the given engine. 351 func (sb *Backend) VerifySeal(chain consensus.ChainReader, header *types.Header) error { 352 // Ensure the block number is greater than zero, but less or equal to than max uint64. 353 if header.Number.Cmp(common.Big0) <= 0 || !header.Number.IsUint64() { 354 return errUnknownBlock 355 } 356 357 extra, err := types.ExtractIstanbulExtra(header) 358 if err != nil { 359 return errInvalidExtraDataFormat 360 } 361 362 // Acquire the validator set whose signatures will be verified. 363 // FIXME: Based on the current implemenation of validator set construction, only validator sets 364 // from the canonical chain will be used. This means that if the provided header is a valid 365 // member of a non-canonical chain, seal verification will only succeed if the validator set 366 // happens to be the same as the canonical chain at the same block number (as would be the case 367 // for a fork from the canonical chain which does not cross an epoch boundary) 368 valSet := sb.getValidators(header.Number.Uint64()-1, header.ParentHash) 369 return sb.verifyAggregatedSeal(header.Hash(), valSet, extra.AggregatedSeal) 370 } 371 372 // Prepare initializes the consensus fields of a block header according to the 373 // rules of a particular engine. The changes are executed inline. 374 func (sb *Backend) Prepare(chain consensus.ChainReader, header *types.Header) error { 375 // unused fields, force to set to empty 376 header.Coinbase = sb.address 377 header.Nonce = emptyNonce 378 header.MixDigest = types.IstanbulDigest 379 380 // copy the parent extra data as the header extra data 381 number := header.Number.Uint64() 382 parent := chain.GetHeader(header.ParentHash, number-1) 383 if parent == nil { 384 return consensus.ErrUnknownAncestor 385 } 386 // use the same difficulty for all blocks 387 header.Difficulty = defaultDifficulty 388 389 // set header's timestamp 390 header.Time = new(big.Int).Add(parent.Time, new(big.Int).SetUint64(sb.config.BlockPeriod)) 391 if header.Time.Int64() < time.Now().Unix() { 392 header.Time = big.NewInt(time.Now().Unix()) 393 } 394 395 if err := writeEmptyIstanbulExtra(header); err != nil { 396 return err 397 } 398 399 // wait for the timestamp of header, use this to adjust the block period 400 delay := time.Unix(header.Time.Int64(), 0).Sub(now()) 401 time.Sleep(delay) 402 403 return sb.addParentSeal(chain, header) 404 } 405 406 // UpdateValSetDiff will update the validator set diff in the header, if the mined header is the last block of the epoch 407 func (sb *Backend) UpdateValSetDiff(chain consensus.ChainReader, header *types.Header, state *state.StateDB) error { 408 // If this is the last block of the epoch, then get the validator set diff, to save into the header 409 log.Trace("Called UpdateValSetDiff", "number", header.Number.Uint64(), "epoch", sb.config.Epoch) 410 if istanbul.IsLastBlockOfEpoch(header.Number.Uint64(), sb.config.Epoch) { 411 newValSet, err := sb.getNewValidatorSet(header, state) 412 if err == nil { 413 // Get the last epoch's validator set 414 snap, err := sb.snapshot(chain, header.Number.Uint64()-1, header.ParentHash, nil) 415 if err != nil { 416 return err 417 } 418 419 // add validators in snapshot to extraData's validators section 420 return writeValidatorSetDiff(header, snap.validators(), newValSet) 421 } 422 } 423 // If it's not the last block or we were unable to pull the new validator set, then the validator set diff should be empty 424 return writeValidatorSetDiff(header, []istanbul.ValidatorData{}, []istanbul.ValidatorData{}) 425 } 426 427 // Returns whether or not a particular header represents the last block in the epoch. 428 func (sb *Backend) IsLastBlockOfEpoch(header *types.Header) bool { 429 return istanbul.IsLastBlockOfEpoch(header.Number.Uint64(), sb.config.Epoch) 430 } 431 432 // Returns the size of epochs in blocks. 433 func (sb *Backend) EpochSize() uint64 { 434 return sb.config.Epoch 435 } 436 437 // Returns the size of the lookback window for calculating uptime (in blocks) 438 func (sb *Backend) LookbackWindow() uint64 { 439 return sb.config.LookbackWindow 440 } 441 442 // Finalize runs any post-transaction state modifications (e.g. block rewards) 443 // and assembles the final block. 444 // 445 // Note, the block header and state database might be updated to reflect any 446 // consensus rules that happen at finalization (e.g. block rewards). 447 func (sb *Backend) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, randomness *types.Randomness) (*types.Block, error) { 448 start := time.Now() 449 defer sb.finalizationTimer.UpdateSince(start) 450 451 snapshot := state.Snapshot() 452 err := sb.setInitialGoldTokenTotalSupplyIfUnset(header, state) 453 if err != nil { 454 state.RevertToSnapshot(snapshot) 455 } 456 457 // Trigger an update to the gas price minimum in the GasPriceMinimum contract based on block congestion 458 snapshot = state.Snapshot() 459 _, err = gpm.UpdateGasPriceMinimum(header, state) 460 if err != nil { 461 state.RevertToSnapshot(snapshot) 462 } 463 464 sb.logger.Trace("Finalizing", "block", header.Number.Uint64(), "epochSize", sb.config.Epoch) 465 if istanbul.IsLastBlockOfEpoch(header.Number.Uint64(), sb.config.Epoch) { 466 snapshot = state.Snapshot() 467 err = sb.distributeEpochPaymentsAndRewards(header, state) 468 if err != nil { 469 state.RevertToSnapshot(snapshot) 470 } 471 } 472 473 header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) 474 header.UncleHash = nilUncleHash 475 476 if len(state.GetLogs(common.Hash{})) > 0 { 477 receipt := types.NewReceipt(nil, false, 0) 478 receipt.Logs = state.GetLogs(common.Hash{}) 479 receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) 480 receipts = append(receipts, receipt) 481 } 482 483 // Assemble and return the final block for sealing 484 return types.NewBlock(header, txs, nil, receipts, randomness), nil 485 } 486 487 // Seal generates a new block for the given input block with the local miner's 488 // seal place on top. 489 func (sb *Backend) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { 490 // update the block header timestamp and signature and propose the block to core engine 491 header := block.Header() 492 number := header.Number.Uint64() 493 494 // Bail out if we're unauthorized to sign a block 495 snap, err := sb.snapshot(chain, number-1, header.ParentHash, nil) 496 if err != nil { 497 return err 498 } 499 if _, v := snap.ValSet.GetByAddress(sb.address); v == nil { 500 return errUnauthorized 501 } 502 503 parent := chain.GetHeader(header.ParentHash, number-1) 504 if parent == nil { 505 return consensus.ErrUnknownAncestor 506 } 507 block, err = sb.updateBlock(parent, block) 508 if err != nil { 509 return err 510 } 511 512 // get the proposed block hash and clear it if the seal() is completed. 513 sb.sealMu.Lock() 514 sb.proposedBlockHash = block.Hash() 515 clear := func() { 516 sb.proposedBlockHash = common.Hash{} 517 sb.sealMu.Unlock() 518 } 519 520 // post block into Istanbul engine 521 go sb.EventMux().Post(istanbul.RequestEvent{ 522 Proposal: block, 523 }) 524 525 go func() { 526 defer clear() 527 for { 528 select { 529 case result := <-sb.commitCh: 530 // Somehow, the block `result` coming from commitCh can be null 531 // if the block hash and the hash from channel are the same, 532 // return the result. Otherwise, keep waiting the next hash. 533 if result != nil && block.Hash() == result.Hash() { 534 results <- result 535 return 536 } 537 case <-stop: 538 return 539 } 540 } 541 }() 542 return nil 543 } 544 545 // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty 546 // that a new block should have based on the previous blocks in the chain and the 547 // current signer. 548 func (sb *Backend) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { 549 return defaultDifficulty 550 } 551 552 // SealHash returns the hash of a block prior to it being sealed. 553 func (sb *Backend) SealHash(header *types.Header) common.Hash { 554 return sigHash(header) 555 } 556 557 // update timestamp and signature of the block based on its number of transactions 558 func (sb *Backend) updateBlock(parent *types.Header, block *types.Block) (*types.Block, error) { 559 header := block.Header() 560 // sign the hash 561 seal, err := sb.Sign(sigHash(header).Bytes()) 562 if err != nil { 563 return nil, err 564 } 565 566 err = writeSeal(header, seal) 567 if err != nil { 568 return nil, err 569 } 570 571 return block.WithSeal(header), nil 572 } 573 574 // APIs returns the RPC APIs this consensus engine provides. 575 func (sb *Backend) APIs(chain consensus.ChainReader) []rpc.API { 576 return []rpc.API{{ 577 Namespace: "istanbul", 578 Version: "1.0", 579 Service: &API{chain: chain, istanbul: sb}, 580 Public: true, 581 }} 582 } 583 584 func (sb *Backend) SetChain(chain consensus.ChainReader, currentBlock func() *types.Block) { 585 sb.chain = chain 586 sb.currentBlock = currentBlock 587 } 588 589 // Start implements consensus.Istanbul.Start 590 func (sb *Backend) Start(hasBadBlock func(common.Hash) bool, 591 stateAt func(common.Hash) (*state.StateDB, error), processBlock func(*types.Block, *state.StateDB) (types.Receipts, []*types.Log, uint64, error), 592 validateState func(*types.Block, *state.StateDB, types.Receipts, uint64) error) error { 593 sb.coreMu.Lock() 594 defer sb.coreMu.Unlock() 595 if sb.coreStarted { 596 return istanbul.ErrStartedEngine 597 } 598 599 // clear previous data 600 sb.proposedBlockHash = common.Hash{} 601 if sb.commitCh != nil { 602 close(sb.commitCh) 603 } 604 sb.commitCh = make(chan *types.Block, 1) 605 606 if sb.newEpochCh != nil { 607 close(sb.newEpochCh) 608 } 609 sb.newEpochCh = make(chan struct{}) 610 611 sb.hasBadBlock = hasBadBlock 612 sb.stateAt = stateAt 613 sb.processBlock = processBlock 614 sb.validateState = validateState 615 616 sb.logger.Info("Starting istanbul.Engine") 617 if err := sb.core.Start(); err != nil { 618 return err 619 } 620 621 sb.coreStarted = true 622 623 go sb.sendAnnounceMsgs() 624 625 if sb.config.Proxied { 626 if sb.config.ProxyInternalFacingNode != nil && sb.config.ProxyExternalFacingNode != nil { 627 if err := sb.addProxy(sb.config.ProxyInternalFacingNode, sb.config.ProxyExternalFacingNode); err != nil { 628 sb.logger.Error("Issue in adding proxy on istanbul start", "err", err) 629 } 630 } 631 632 go sb.sendValEnodesShareMsgs() 633 } else { 634 headBlock := sb.GetCurrentHeadBlock() 635 valset := sb.getValidators(headBlock.Number().Uint64(), headBlock.Hash()) 636 sb.RefreshValPeers(valset) 637 } 638 639 return nil 640 } 641 642 // Stop implements consensus.Istanbul.Stop 643 func (sb *Backend) Stop() error { 644 sb.coreMu.Lock() 645 defer sb.coreMu.Unlock() 646 if !sb.coreStarted { 647 return istanbul.ErrStoppedEngine 648 } 649 sb.logger.Info("Stopping istanbul.Engine") 650 if err := sb.core.Stop(); err != nil { 651 return err 652 } 653 sb.coreStarted = false 654 655 sb.announceQuit <- struct{}{} 656 sb.announceWg.Wait() 657 658 if sb.config.Proxied { 659 sb.valEnodesShareQuit <- struct{}{} 660 sb.valEnodesShareWg.Wait() 661 662 if sb.proxyNode != nil { 663 sb.removeProxy(sb.proxyNode.node) 664 } 665 } 666 return nil 667 } 668 669 // snapshot retrieves the validator set needed to sign off on the block immediately after 'number'. E.g. if you need to find the validator set that needs to sign off on block 6, 670 // this method should be called with number set to 5. 671 // 672 // hash - The requested snapshot's block's hash. Only used for snapshot cache storage. 673 // number - The requested snapshot's block number 674 // parents - (Optional argument) An array of headers from directly previous blocks. 675 func (sb *Backend) snapshot(chain consensus.ChainReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) { 676 // Search for a snapshot in memory or on disk 677 var ( 678 headers []*types.Header 679 header *types.Header 680 snap *Snapshot 681 ) 682 683 numberIter := number 684 685 // If numberIter is not the last block of an epoch, then adjust it to be the last block of the previous epoch 686 if !istanbul.IsLastBlockOfEpoch(numberIter, sb.config.Epoch) { 687 epochNum := istanbul.GetEpochNumber(numberIter, sb.config.Epoch) 688 numberIter = istanbul.GetEpochLastBlockNumber(epochNum-1, sb.config.Epoch) 689 } 690 691 // At this point, numberIter will always be the last block number of an epoch. Namely, it will be 692 // block numbers where the header contains the validator set diff. 693 // Note that block 0 (the genesis block) is one of those headers. It contains the initial set of validators in the 694 // 'addedValidators' field in the header. 695 696 // Retrieve the most recent cached or on disk snapshot. 697 for ; ; numberIter = numberIter - sb.config.Epoch { 698 // If an in-memory snapshot was found, use that 699 if s, ok := sb.recentSnapshots.Get(numberIter); ok { 700 snap = s.(*Snapshot) 701 break 702 } 703 704 var blockHash common.Hash 705 if numberIter == number && hash != (common.Hash{}) { 706 blockHash = hash 707 } else { 708 header = chain.GetHeaderByNumber(numberIter) 709 if header == nil { 710 log.Trace("Unable to find header in chain", "number", number) 711 } else { 712 blockHash = chain.GetHeaderByNumber(numberIter).Hash() 713 } 714 } 715 716 if (blockHash != common.Hash{}) { 717 if s, err := loadSnapshot(sb.config.Epoch, sb.db, blockHash); err == nil { 718 log.Trace("Loaded validator set snapshot from disk", "number", numberIter, "hash", blockHash) 719 snap = s 720 break 721 } 722 } 723 724 if numberIter == 0 { 725 break 726 } 727 728 // Panic if numberIter underflows (becomes greater than number). 729 if numberIter > number { 730 panic(fmt.Sprintf("There is a bug in the code. NumberIter underflowed, and should of stopped at 0. NumberIter: %v, number: %v", numberIter, number)) 731 } 732 } 733 734 // If snapshot is still nil, then create a snapshot from genesis block 735 if snap == nil { 736 log.Debug("Snapshot is nil, creating from genesis") 737 // Panic if the numberIter does not equal 0 738 if numberIter != 0 { 739 panic(fmt.Sprintf("There is a bug in the code. NumberIter should be 0. NumberIter: %v", numberIter)) 740 } 741 742 genesis := chain.GetHeaderByNumber(0) 743 744 istanbulExtra, err := types.ExtractIstanbulExtra(genesis) 745 if err != nil { 746 log.Error("Unable to extract istanbul extra", "err", err) 747 return nil, err 748 } 749 750 // The genesis block should have an empty RemovedValidators set. If not, throw an error 751 if istanbulExtra.RemovedValidators.BitLen() != 0 { 752 log.Error("Genesis block has a non empty RemovedValidators set") 753 return nil, errInvalidValidatorSetDiff 754 } 755 756 validators, err := istanbul.CombineIstanbulExtraToValidatorData(istanbulExtra.AddedValidators, istanbulExtra.AddedValidatorsPublicKeys) 757 if err != nil { 758 log.Error("Cannot construct validators data from istanbul extra") 759 return nil, errInvalidValidatorSetDiff 760 } 761 snap = newSnapshot(sb.config.Epoch, 0, genesis.Hash(), validator.NewSet(validators)) 762 763 if err := snap.store(sb.db); err != nil { 764 log.Error("Unable to store snapshot", "err", err) 765 return nil, err 766 } 767 } 768 769 log.Trace("Most recent snapshot found", "number", numberIter) 770 // Calculate the returned snapshot by applying epoch headers' val set diffs to the intermediate snapshot (the one that is retrieved/created from above). 771 // This will involve retrieving all of those headers into an array, and then call snapshot.apply on that array and the intermediate snapshot. 772 // Note that the callee of this method may have passed in a set of previous headers, so we may be able to use some of them. 773 for numberIter+sb.config.Epoch <= number { 774 numberIter += sb.config.Epoch 775 776 log.Trace("Retrieving ancestor header", "number", number, "numberIter", numberIter, "parents size", len(parents)) 777 inParents := -1 778 for i := len(parents) - 1; i >= 0; i-- { 779 if parents[i].Number.Uint64() == numberIter { 780 inParents = i 781 break 782 } 783 } 784 if inParents >= 0 { 785 header = parents[inParents] 786 log.Trace("Retrieved header from parents param", "header num", header.Number.Uint64()) 787 } else { 788 header = chain.GetHeaderByNumber(numberIter) 789 if header == nil { 790 log.Error("The header retrieved from the chain is nil", "block num", numberIter) 791 return nil, errUnknownBlock 792 } 793 } 794 795 headers = append(headers, header) 796 } 797 798 if len(headers) > 0 { 799 var err error 800 log.Trace("Snapshot headers len greater than 0", "headers", headers) 801 snap, err = snap.apply(headers, sb.db) 802 if err != nil { 803 log.Error("Unable to apply headers to snapshots", "headers", headers) 804 return nil, err 805 } 806 807 sb.recentSnapshots.Add(numberIter, snap) 808 } 809 // Make a copy of the snapshot to return, since a few fields will be modified. 810 // The original snap is probably stored within the LRU cache, so we don't want to 811 // modify that one. 812 returnSnap := snap.copy() 813 814 returnSnap.Number = number 815 returnSnap.Hash = hash 816 817 return returnSnap, nil 818 } 819 820 func (sb *Backend) addParentSeal(chain consensus.ChainReader, header *types.Header) error { 821 number := header.Number.Uint64() 822 logger := sb.logger.New("func", "Backend.addParentSeal()", "number", number) 823 824 // only do this for blocks which start with block 1 as a parent 825 if number <= 1 { 826 return nil 827 } 828 829 // Get parent's extra to fetch it's AggregatedSeal 830 parent := chain.GetHeader(header.ParentHash, number-1) 831 parentExtra, err := types.ExtractIstanbulExtra(parent) 832 if err != nil { 833 return err 834 } 835 836 createParentSeal := func() types.IstanbulAggregatedSeal { 837 // In some cases, "addParentSeal" may be called before sb.core has moved to the next sequence, 838 // preventing signature aggregation. 839 // This typically happens in round > 0, since round 0 typically hits the "time.Sleep()" 840 // above. 841 // When this happens, loop until sb.core moves to the next sequence, with a limit of 500ms. 842 seq := waitCoreToReachSequence(sb.core, header.Number) 843 if seq == nil { 844 return parentExtra.AggregatedSeal 845 } 846 847 logger = logger.New("parentAggregatedSeal", parentExtra.AggregatedSeal.String(), "cur_seq", seq) 848 849 parentCommits := sb.core.ParentCommits() 850 if parentCommits == nil || parentCommits.Size() == 0 { 851 logger.Debug("No additional seals to combine with ParentAggregatedSeal") 852 return parentExtra.AggregatedSeal 853 } 854 855 logger = logger.New("numParentCommits", parentCommits.Size()) 856 logger.Trace("Found commit messages from previous sequence to combine with ParentAggregatedSeal") 857 858 // if we had any seals gossiped to us, proceed to add them to the 859 // already aggregated signature 860 unionAggregatedSeal, err := istanbulCore.UnionOfSeals(parentExtra.AggregatedSeal, parentCommits) 861 if err != nil { 862 logger.Error("Failed to combine commit messages with ParentAggregatedSeal", "err", err) 863 return parentExtra.AggregatedSeal 864 } 865 866 // need to pass the previous block from the parent to get the parent's validators 867 // (otherwise we'd be getting the validators for the current block) 868 parentValidators := sb.getValidators(parent.Number.Uint64()-1, parent.ParentHash) 869 // only update to use the union if we indeed provided a valid aggregate signature for this block 870 if err := sb.verifyAggregatedSeal(parent.Hash(), parentValidators, unionAggregatedSeal); err != nil { 871 logger.Error("Failed to verify combined ParentAggregatedSeal", "err", err) 872 return parentExtra.AggregatedSeal 873 } 874 875 logger.Debug("Succeeded in verifying combined ParentAggregatedSeal", "combinedParentAggregatedSeal", unionAggregatedSeal.String()) 876 return unionAggregatedSeal 877 } 878 879 return writeAggregatedSeal(header, createParentSeal(), true) 880 } 881 882 // FIXME: Need to update this for Istanbul 883 // sigHash returns the hash which is used as input for the Istanbul 884 // signing. It is the hash of the entire header apart from the 65 byte signature 885 // contained at the end of the extra data. 886 // 887 // Note, the method requires the extra data to be at least 65 bytes, otherwise it 888 // panics. This is done to avoid accidentally using both forms (signature present 889 // or not), which could be abused to produce different hashes for the same header. 890 func sigHash(header *types.Header) (hash common.Hash) { 891 hasher := sha3.NewLegacyKeccak256() 892 893 // Clean seal is required for calculating proposer seal. 894 rlp.Encode(hasher, types.IstanbulFilteredHeader(header, false)) 895 hasher.Sum(hash[:0]) 896 return hash 897 } 898 899 // ecrecover extracts the Ethereum account address from a signed header. 900 func ecrecover(header *types.Header) (common.Address, error) { 901 hash := header.Hash() 902 if addr, ok := recentAddresses.Get(hash); ok { 903 return addr.(common.Address), nil 904 } 905 906 // Retrieve the signature from the header extra-data 907 istanbulExtra, err := types.ExtractIstanbulExtra(header) 908 if err != nil { 909 return common.Address{}, err 910 } 911 912 addr, err := istanbul.GetSignatureAddress(sigHash(header).Bytes(), istanbulExtra.Seal) 913 if err != nil { 914 return addr, err 915 } 916 recentAddresses.Add(hash, addr) 917 return addr, nil 918 } 919 920 func writeEmptyIstanbulExtra(header *types.Header) error { 921 extra := types.IstanbulExtra{ 922 AddedValidators: []common.Address{}, 923 AddedValidatorsPublicKeys: []blscrypto.SerializedPublicKey{}, 924 RemovedValidators: big.NewInt(0), 925 Seal: []byte{}, 926 AggregatedSeal: types.IstanbulAggregatedSeal{}, 927 ParentAggregatedSeal: types.IstanbulAggregatedSeal{}, 928 } 929 payload, err := rlp.EncodeToBytes(&extra) 930 if err != nil { 931 return err 932 } 933 934 if len(header.Extra) < types.IstanbulExtraVanity { 935 header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, types.IstanbulExtraVanity-len(header.Extra))...) 936 } 937 header.Extra = append(header.Extra[:types.IstanbulExtraVanity], payload...) 938 939 return nil 940 } 941 942 // writeValidatorSetDiff initializes the header's Extra field with any changes in the 943 // validator set that occurred since the last block 944 func writeValidatorSetDiff(header *types.Header, oldValSet []istanbul.ValidatorData, newValSet []istanbul.ValidatorData) error { 945 // compensate the lack bytes if header.Extra is not enough IstanbulExtraVanity bytes. 946 if len(header.Extra) < types.IstanbulExtraVanity { 947 header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, types.IstanbulExtraVanity-len(header.Extra))...) 948 } 949 950 addedValidators, removedValidators := istanbul.ValidatorSetDiff(oldValSet, newValSet) 951 addedValidatorsAddresses, addedValidatorsPublicKeys := istanbul.SeparateValidatorDataIntoIstanbulExtra(addedValidators) 952 953 if len(addedValidators) > 0 || removedValidators.BitLen() > 0 { 954 oldValidatorsAddresses, _ := istanbul.SeparateValidatorDataIntoIstanbulExtra(oldValSet) 955 newValidatorsAddresses, _ := istanbul.SeparateValidatorDataIntoIstanbulExtra(newValSet) 956 log.Debug("Setting istanbul header validator fields", "oldValSet", common.ConvertToStringSlice(oldValidatorsAddresses), "newValSet", common.ConvertToStringSlice(newValidatorsAddresses), 957 "addedValidators", common.ConvertToStringSlice(addedValidatorsAddresses), "removedValidators", removedValidators.Text(16)) 958 } 959 960 extra, err := types.ExtractIstanbulExtra(header) 961 if err != nil { 962 return nil 963 } 964 965 extra.AddedValidators = addedValidatorsAddresses 966 extra.AddedValidatorsPublicKeys = addedValidatorsPublicKeys 967 extra.RemovedValidators = removedValidators 968 969 // update the header's extra with the new diff 970 payload, err := rlp.EncodeToBytes(extra) 971 if err != nil { 972 return err 973 } 974 header.Extra = append(header.Extra[:types.IstanbulExtraVanity], payload...) 975 976 return nil 977 } 978 979 // writeSeal writes the extra-data field of the given header with the given seal. 980 func writeSeal(h *types.Header, seal []byte) error { 981 if len(seal) != types.IstanbulExtraSeal { 982 return errInvalidSignature 983 } 984 985 istanbulExtra, err := types.ExtractIstanbulExtra(h) 986 if err != nil { 987 return err 988 } 989 990 istanbulExtra.Seal = seal 991 payload, err := rlp.EncodeToBytes(&istanbulExtra) 992 if err != nil { 993 return err 994 } 995 996 h.Extra = append(h.Extra[:types.IstanbulExtraVanity], payload...) 997 return nil 998 } 999 1000 // writeAggregatedSeal writes the extra-data field of a block header with given committed 1001 // seals. If isParent is set to true, then it will write to the fields related 1002 // to the parent commits of the block 1003 func writeAggregatedSeal(h *types.Header, aggregatedSeal types.IstanbulAggregatedSeal, isParent bool) error { 1004 if len(aggregatedSeal.Signature) != types.IstanbulExtraBlsSignature { 1005 return errInvalidAggregatedSeal 1006 } 1007 1008 istanbulExtra, err := types.ExtractIstanbulExtra(h) 1009 if err != nil { 1010 return err 1011 } 1012 1013 if isParent { 1014 istanbulExtra.ParentAggregatedSeal = aggregatedSeal 1015 } else { 1016 istanbulExtra.AggregatedSeal = aggregatedSeal 1017 } 1018 1019 payload, err := rlp.EncodeToBytes(&istanbulExtra) 1020 if err != nil { 1021 return err 1022 } 1023 1024 // compensate the lack bytes if header.Extra is not enough IstanbulExtraVanity bytes. 1025 if len(h.Extra) < types.IstanbulExtraVanity { 1026 h.Extra = append(h.Extra, bytes.Repeat([]byte{0x00}, types.IstanbulExtraVanity-len(h.Extra))...) 1027 } 1028 1029 h.Extra = append(h.Extra[:types.IstanbulExtraVanity], payload...) 1030 return nil 1031 } 1032 1033 func waitCoreToReachSequence(core istanbulCore.Engine, expectedSequence *big.Int) *big.Int { 1034 logger := log.New("func", "waitCoreToReachSequence") 1035 timeout := time.After(500 * time.Millisecond) 1036 ticker := time.NewTicker(10 * time.Millisecond) 1037 defer ticker.Stop() 1038 for { 1039 select { 1040 case <-ticker.C: 1041 view := core.CurrentView() 1042 if view != nil && view.Sequence != nil && view.Sequence.Cmp(expectedSequence) == 0 { 1043 logger.Trace("Current sequence matches header", "cur_seq", view.Sequence) 1044 return view.Sequence 1045 } 1046 case <-timeout: 1047 // TODO(asa): Why is this logged by full nodes? 1048 log.Trace("Timed out while waiting for core to sequence change, unable to combine commit messages with ParentAggregatedSeal", "cur_view", core.CurrentView()) 1049 return nil 1050 } 1051 } 1052 }