github.com/klaytn/klaytn@v1.12.1/consensus/gxhash/consensus.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2017 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from consensus/ethash/consensus.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package gxhash
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"math/big"
    27  	"runtime"
    28  	"time"
    29  
    30  	"github.com/klaytn/klaytn/blockchain/state"
    31  	"github.com/klaytn/klaytn/blockchain/types"
    32  	"github.com/klaytn/klaytn/common"
    33  	"github.com/klaytn/klaytn/consensus"
    34  	"github.com/klaytn/klaytn/params"
    35  )
    36  
    37  var (
    38  	ByzantiumBlockReward   *big.Int = big.NewInt(3e+18) // Block reward in peb for successfully mining a block upward from Byzantium
    39  	allowedFutureBlockTime          = 15 * time.Second  // Max time from current time allowed for blocks, before they're considered future blocks
    40  )
    41  
    42  // Various error messages to mark blocks invalid. These should be private to
    43  // prevent engine specific errors from being referenced in the remainder of the
    44  // codebase, inherently breaking if the engine is swapped out. Please put common
    45  // error types into the consensus package.
    46  var (
    47  	errLargeBlockTime    = errors.New("timestamp too big")
    48  	errZeroBlockTime     = errors.New("timestamp equals parent's")
    49  	errInvalidBlockScore = errors.New("non-positive blockScore")
    50  	errInvalidPoW        = errors.New("invalid proof-of-work")
    51  )
    52  
    53  // Author implements consensus.Engine, returning the header's coinbase as the
    54  // proof-of-work verified author of the block.
    55  func (gxhash *Gxhash) Author(header *types.Header) (common.Address, error) {
    56  	// Returns arbitrary address because gxhash is used just for testing
    57  	return params.AuthorAddressForTesting, nil
    58  }
    59  
    60  // CanVerifyHeadersConcurrently returns true if concurrent header verification possible, otherwise returns false.
    61  func (gxhash *Gxhash) CanVerifyHeadersConcurrently() bool {
    62  	return true
    63  }
    64  
    65  // PreprocessHeaderVerification prepares header verification for heavy computation before synchronous header verification such as ecrecover.
    66  func (gxhash *Gxhash) PreprocessHeaderVerification(headers []*types.Header) (chan<- struct{}, <-chan error) {
    67  	panic("this method is not used for PoW engine")
    68  }
    69  
    70  // CreateSnapshot is not used for PoW engine.
    71  func (gxhash *Gxhash) CreateSnapshot(chain consensus.ChainReader, number uint64, hash common.Hash, parents []*types.Header) error {
    72  	return nil
    73  }
    74  
    75  // GetConsensusInfo is not used for PoW engine.
    76  func (gxhash *Gxhash) GetConsensusInfo(block *types.Block) (consensus.ConsensusInfo, error) {
    77  	return consensus.ConsensusInfo{}, nil
    78  }
    79  
    80  // VerifyHeader checks whether a header conforms to the consensus rules of the
    81  // stock Klaytn gxhash engine.
    82  func (gxhash *Gxhash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
    83  	// If we're running a full engine faking, accept any input as valid
    84  	if gxhash.config.PowMode == ModeFullFake {
    85  		return nil
    86  	}
    87  	// Short circuit if the header is known, or it's parent not
    88  	number := header.Number.Uint64()
    89  	if chain.GetHeader(header.Hash(), number) != nil {
    90  		return nil
    91  	}
    92  	parent := chain.GetHeader(header.ParentHash, number-1)
    93  	if parent == nil {
    94  		return consensus.ErrUnknownAncestor
    95  	}
    96  	// Sanity checks passed, do a proper verification
    97  	return gxhash.verifyHeader(chain, header, parent, seal)
    98  }
    99  
   100  // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
   101  // concurrently. The method returns a quit channel to abort the operations and
   102  // a results channel to retrieve the async verifications.
   103  func (gxhash *Gxhash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
   104  	// If we're running a full engine faking, accept any input as valid
   105  	if gxhash.config.PowMode == ModeFullFake || len(headers) == 0 {
   106  		abort, results := make(chan struct{}), make(chan error, len(headers))
   107  		for i := 0; i < len(headers); i++ {
   108  			results <- nil
   109  		}
   110  		return abort, results
   111  	}
   112  
   113  	// Spawn as many workers as allowed threads
   114  	workers := runtime.GOMAXPROCS(0)
   115  	if len(headers) < workers {
   116  		workers = len(headers)
   117  	}
   118  
   119  	// Create a task channel and spawn the verifiers
   120  	var (
   121  		inputs = make(chan int)
   122  		done   = make(chan int, workers)
   123  		errors = make([]error, len(headers))
   124  		abort  = make(chan struct{})
   125  	)
   126  	for i := 0; i < workers; i++ {
   127  		go func() {
   128  			for index := range inputs {
   129  				errors[index] = gxhash.verifyHeaderWorker(chain, headers, seals, index)
   130  				done <- index
   131  			}
   132  		}()
   133  	}
   134  
   135  	errorsOut := make(chan error, len(headers))
   136  	go func() {
   137  		defer close(inputs)
   138  		var (
   139  			in, out = 0, 0
   140  			checked = make([]bool, len(headers))
   141  			inputs  = inputs
   142  		)
   143  		for {
   144  			select {
   145  			case inputs <- in:
   146  				if in++; in == len(headers) {
   147  					// Reached end of headers. Stop sending to workers.
   148  					inputs = nil
   149  				}
   150  			case index := <-done:
   151  				for checked[index] = true; checked[out]; out++ {
   152  					errorsOut <- errors[out]
   153  					if out == len(headers)-1 {
   154  						return
   155  					}
   156  				}
   157  			case <-abort:
   158  				return
   159  			}
   160  		}
   161  	}()
   162  	return abort, errorsOut
   163  }
   164  
   165  func (gxhash *Gxhash) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error {
   166  	var parent *types.Header
   167  	if index == 0 {
   168  		parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
   169  	} else if headers[index-1].Hash() == headers[index].ParentHash {
   170  		parent = headers[index-1]
   171  	}
   172  	if parent == nil {
   173  		return consensus.ErrUnknownAncestor
   174  	}
   175  	if chain.GetHeader(headers[index].Hash(), headers[index].Number.Uint64()) != nil {
   176  		return nil // known block
   177  	}
   178  	return gxhash.verifyHeader(chain, headers[index], parent, seals[index])
   179  }
   180  
   181  // verifyHeader checks whether a header conforms to the consensus rules of the
   182  // stock Klaytn gxhash engine.
   183  // See YP section 4.3.4. "Block Header Validity"
   184  func (gxhash *Gxhash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, seal bool) error {
   185  	// Ensure that the header's extra-data section is of a reasonable size
   186  	if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
   187  		return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
   188  	}
   189  
   190  	// Verify the header's timestamp
   191  	if header.Time.Cmp(big.NewInt(time.Now().Add(allowedFutureBlockTime).Unix())) > 0 {
   192  		return consensus.ErrFutureBlock
   193  	}
   194  	if header.Time.Cmp(parent.Time) <= 0 {
   195  		return errZeroBlockTime
   196  	}
   197  	// Verify the block's blockscore based in it's timestamp and parent's blockscore
   198  	expected := gxhash.CalcBlockScore(chain, header.Time.Uint64(), parent)
   199  
   200  	if expected.Cmp(header.BlockScore) != 0 {
   201  		return fmt.Errorf("invalid blockscore: have %v, want %v", header.BlockScore, expected)
   202  	}
   203  	// Verify that the block number is parent's +1
   204  	if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(common.Big1) != 0 {
   205  		return consensus.ErrInvalidNumber
   206  	}
   207  	// Verify the engine specific seal securing the block
   208  	if seal {
   209  		if err := gxhash.VerifySeal(chain, header); err != nil {
   210  			return err
   211  		}
   212  	}
   213  	return nil
   214  }
   215  
   216  // CalcBlockScore is the blockscore adjustment algorithm. It returns
   217  // the blockscore that a new block should have when created at time
   218  // given the parent block's time and blockscore.
   219  func (gxhash *Gxhash) CalcBlockScore(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
   220  	return CalcBlockScore(chain.Config(), time, parent)
   221  }
   222  
   223  // CalcBlockScore is the blockscore adjustment algorithm. It returns
   224  // the blockscore that a new block should have when created at time
   225  // given the parent block's time and blockscore.
   226  func CalcBlockScore(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
   227  	return calcBlockScoreByzantium(time, parent)
   228  }
   229  
   230  // Some weird constants to avoid constant memory allocs for them.
   231  var (
   232  	expDiffPeriod = big.NewInt(100000)
   233  	big1          = big.NewInt(1)
   234  	big2          = big.NewInt(2)
   235  	big9          = big.NewInt(9)
   236  	big10         = big.NewInt(10)
   237  	bigMinus99    = big.NewInt(-99)
   238  	big2999999    = big.NewInt(2999999)
   239  )
   240  
   241  // calcBlockScoreByzantium is the blockscore adjustment algorithm. It returns
   242  // the blockscore that a new block should have when created at time given the
   243  // parent block's time and blockscore. The calculation uses the Byzantium rules.
   244  func calcBlockScoreByzantium(time uint64, parent *types.Header) *big.Int {
   245  	// https://github.com/ethereum/EIPs/issues/100.
   246  	// algorithm:
   247  	// diff = (parent_diff +
   248  	//         (parent_diff / 2048 * max((1) - ((timestamp - parent.timestamp) // 9), -99))
   249  	//        ) + 2^(periodCount - 2)
   250  
   251  	bigTime := new(big.Int).SetUint64(time)
   252  	bigParentTime := new(big.Int).Set(parent.Time)
   253  
   254  	// holds intermediate values to make the algo easier to read & audit
   255  	x := new(big.Int)
   256  	y := new(big.Int)
   257  
   258  	// (1) - (block_timestamp - parent_timestamp) // 9
   259  	x.Sub(bigTime, bigParentTime)
   260  	x.Div(x, big9)
   261  	x.Sub(big1, x)
   262  
   263  	// max((1) - (block_timestamp - parent_timestamp) // 9, -99)
   264  	if x.Cmp(bigMinus99) < 0 {
   265  		x.Set(bigMinus99)
   266  	}
   267  	// parent_diff + (parent_diff / 2048 * max((1) - ((timestamp - parent.timestamp) // 9), -99))
   268  	y.Div(parent.BlockScore, params.BlockScoreBoundDivisor)
   269  	x.Mul(y, x)
   270  	x.Add(parent.BlockScore, x)
   271  
   272  	// minimum blockscore can ever be (before exponential factor)
   273  	if x.Cmp(params.MinimumBlockScore) < 0 {
   274  		x.Set(params.MinimumBlockScore)
   275  	}
   276  	// calculate a fake block number for the ice-age delay:
   277  	//   https://github.com/ethereum/EIPs/pull/669
   278  	//   fake_block_number = min(0, block.number - 3_000_000
   279  	fakeBlockNumber := new(big.Int)
   280  	if parent.Number.Cmp(big2999999) >= 0 {
   281  		fakeBlockNumber = fakeBlockNumber.Sub(parent.Number, big2999999) // Note, parent is 1 less than the actual block number
   282  	}
   283  	// for the exponential factor
   284  	periodCount := fakeBlockNumber
   285  	periodCount.Div(periodCount, expDiffPeriod)
   286  
   287  	// the exponential factor, commonly referred to as "the bomb"
   288  	// diff = diff + 2^(periodCount - 2)
   289  	if periodCount.Cmp(big1) > 0 {
   290  		y.Sub(periodCount, big2)
   291  		y.Exp(big2, y, nil)
   292  		x.Add(x, y)
   293  	}
   294  	return x
   295  }
   296  
   297  // calcBlockScoreHomestead is the blockscore adjustment algorithm. It returns
   298  // the blockscore that a new block should have when created at time given the
   299  // parent block's time and blockscore. The calculation uses the Homestead rules.
   300  func calcBlockScoreHomestead(time uint64, parent *types.Header) *big.Int {
   301  	// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md
   302  	// algorithm:
   303  	// diff = (parent_diff +
   304  	//         (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
   305  	//        ) + 2^(periodCount - 2)
   306  
   307  	bigTime := new(big.Int).SetUint64(time)
   308  	bigParentTime := new(big.Int).Set(parent.Time)
   309  
   310  	// holds intermediate values to make the algo easier to read & audit
   311  	x := new(big.Int)
   312  	y := new(big.Int)
   313  
   314  	// 1 - (block_timestamp - parent_timestamp) // 10
   315  	x.Sub(bigTime, bigParentTime)
   316  	x.Div(x, big10)
   317  	x.Sub(big1, x)
   318  
   319  	// max(1 - (block_timestamp - parent_timestamp) // 10, -99)
   320  	if x.Cmp(bigMinus99) < 0 {
   321  		x.Set(bigMinus99)
   322  	}
   323  	// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
   324  	y.Div(parent.BlockScore, params.BlockScoreBoundDivisor)
   325  	x.Mul(y, x)
   326  	x.Add(parent.BlockScore, x)
   327  
   328  	// minimum blockscore can ever be (before exponential factor)
   329  	if x.Cmp(params.MinimumBlockScore) < 0 {
   330  		x.Set(params.MinimumBlockScore)
   331  	}
   332  	// for the exponential factor
   333  	periodCount := new(big.Int).Add(parent.Number, big1)
   334  	periodCount.Div(periodCount, expDiffPeriod)
   335  
   336  	// the exponential factor, commonly referred to as "the bomb"
   337  	// diff = diff + 2^(periodCount - 2)
   338  	if periodCount.Cmp(big1) > 0 {
   339  		y.Sub(periodCount, big2)
   340  		y.Exp(big2, y, nil)
   341  		x.Add(x, y)
   342  	}
   343  	return x
   344  }
   345  
   346  func (gxhash *Gxhash) InitSnapshot() {}
   347  
   348  // VerifySeal implements consensus.Engine, checking whether the given block satisfies
   349  // the PoW blockscore requirements.
   350  func (gxhash *Gxhash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
   351  	// If we're running a fake PoW, accept any seal as valid
   352  	if gxhash.config.PowMode == ModeFake || gxhash.config.PowMode == ModeFullFake {
   353  		time.Sleep(gxhash.fakeDelay)
   354  		if gxhash.fakeFail == header.Number.Uint64() {
   355  			return errInvalidPoW
   356  		}
   357  		return nil
   358  	}
   359  	// If we're running a shared PoW, delegate verification to it
   360  	if gxhash.shared != nil {
   361  		return gxhash.shared.VerifySeal(chain, header)
   362  	}
   363  	// Ensure that we have a valid blockscore for the block
   364  	if header.BlockScore.Sign() <= 0 {
   365  		return errInvalidBlockScore
   366  	}
   367  	// Recompute the digest and PoW value and verify against the header
   368  	number := header.Number.Uint64()
   369  
   370  	cache := gxhash.cache(number)
   371  	//size := datasetSize(number)
   372  	//if gxhash.config.PowMode == ModeTest {
   373  	//	size = 32 * 1024
   374  	//}
   375  	//digest, result := hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), 0)
   376  	// Caches are unmapped in a finalizer. Ensure that the cache stays live
   377  	// until after the call to hashimotoLight so it's not unmapped while being used.
   378  	runtime.KeepAlive(cache)
   379  
   380  	//target := new(big.Int).Div(maxUint256, header.blockscore)
   381  	//if new(big.Int).SetBytes(result).Cmp(target) > 0 {
   382  	//	return errInvalidPoW
   383  	//}
   384  	return nil
   385  }
   386  
   387  // Prepare implements consensus.Engine, initializing the blockscore field of a
   388  // header to conform to the gxhash protocol. The changes are done inline.
   389  func (gxhash *Gxhash) Prepare(chain consensus.ChainReader, header *types.Header) error {
   390  	parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
   391  	if parent == nil {
   392  		return consensus.ErrUnknownAncestor
   393  	}
   394  	header.BlockScore = gxhash.CalcBlockScore(chain, header.Time.Uint64(), parent)
   395  	return nil
   396  }
   397  
   398  // Finalize implements consensus.Engine, accumulating the block rewards,
   399  // setting the final state and assembling the block.
   400  func (gxhash *Gxhash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, receipts []*types.Receipt) (*types.Block, error) {
   401  	// Accumulate any block rewards and commit the final state root
   402  	accumulateRewards(chain.Config(), state, header)
   403  	header.Root = state.IntermediateRoot(true)
   404  
   405  	// Header seems complete, assemble into a block and return
   406  	return types.NewBlock(header, txs, receipts), nil
   407  }
   408  
   409  // Some weird constants to avoid constant memory allocs for them.
   410  var (
   411  	big8  = big.NewInt(8)
   412  	big32 = big.NewInt(32)
   413  )
   414  
   415  // AccumulateRewards credits the coinbase of the given block with the mining
   416  // reward. The total reward consists of the static block reward.
   417  func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header) {
   418  	// Select the correct block reward based on chain progression
   419  	blockReward := ByzantiumBlockReward
   420  
   421  	// Accumulate the rewards for the miner
   422  	reward := new(big.Int).Set(blockReward)
   423  
   424  	state.AddBalance(params.AuthorAddressForTesting, reward)
   425  }