github.com/dominant-strategies/go-quai@v0.28.2/consensus/progpow/consensus.go (about)

     1  package progpow
     2  
     3  import (
     4  	"bytes"
     5  	"errors"
     6  	"fmt"
     7  	"math/big"
     8  	"runtime"
     9  	"time"
    10  
    11  	mapset "github.com/deckarep/golang-set"
    12  	"github.com/dominant-strategies/go-quai/common"
    13  	"github.com/dominant-strategies/go-quai/consensus"
    14  	"github.com/dominant-strategies/go-quai/consensus/misc"
    15  	"github.com/dominant-strategies/go-quai/core"
    16  	"github.com/dominant-strategies/go-quai/core/state"
    17  	"github.com/dominant-strategies/go-quai/core/types"
    18  	"github.com/dominant-strategies/go-quai/log"
    19  	"github.com/dominant-strategies/go-quai/params"
    20  	"github.com/dominant-strategies/go-quai/trie"
    21  	"modernc.org/mathutil"
    22  )
    23  
    24  // Progpow proof-of-work protocol constants.
    25  var (
    26  	maxUncles                     = 2         // Maximum number of uncles allowed in a single block
    27  	allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
    28  
    29  	ContextTimeFactor = big10
    30  	ZoneBlockReward   = big.NewInt(5e+18)
    31  	RegionBlockReward = new(big.Int).Mul(ZoneBlockReward, big3)
    32  	PrimeBlockReward  = new(big.Int).Mul(RegionBlockReward, big3)
    33  )
    34  
    35  // Some useful constants to avoid constant memory allocs for them.
    36  var (
    37  	expDiffPeriod = big.NewInt(100000)
    38  	big0          = big.NewInt(0)
    39  	big1          = big.NewInt(1)
    40  	big2          = big.NewInt(2)
    41  	big3          = big.NewInt(3)
    42  	big8          = big.NewInt(8)
    43  	big9          = big.NewInt(9)
    44  	big10         = big.NewInt(10)
    45  	big32         = big.NewInt(32)
    46  	bigMinus99    = big.NewInt(-99)
    47  	big2e256      = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // 2^256
    48  )
    49  
    50  // Various error messages to mark blocks invalid. These should be private to
    51  // prevent engine specific errors from being referenced in the remainder of the
    52  // codebase, inherently breaking if the engine is swapped out. Please put common
    53  // error types into the consensus package.
    54  var (
    55  	errOlderBlockTime      = errors.New("timestamp older than parent")
    56  	errTooManyUncles       = errors.New("too many uncles")
    57  	errDuplicateUncle      = errors.New("duplicate uncle")
    58  	errUncleIsAncestor     = errors.New("uncle is ancestor")
    59  	errDanglingUncle       = errors.New("uncle's parent is not ancestor")
    60  	errInvalidDifficulty   = errors.New("non-positive difficulty")
    61  	errDifficultyCrossover = errors.New("sub's difficulty exceeds dom's")
    62  	errInvalidMixHash      = errors.New("invalid mixHash")
    63  	errInvalidPoW          = errors.New("invalid proof-of-work")
    64  	errInvalidOrder        = errors.New("invalid order")
    65  )
    66  
    67  // Author implements consensus.Engine, returning the header's coinbase as the
    68  // proof-of-work verified author of the block.
    69  func (progpow *Progpow) Author(header *types.Header) (common.Address, error) {
    70  	return header.Coinbase(), nil
    71  }
    72  
    73  // VerifyHeader checks whether a header conforms to the consensus rules of the
    74  // stock Quai progpow engine.
    75  func (progpow *Progpow) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
    76  	// If we're running a full engine faking, accept any input as valid
    77  	if progpow.config.PowMode == ModeFullFake {
    78  		return nil
    79  	}
    80  	// Short circuit if the header is known, or its parent not
    81  	number := header.NumberU64()
    82  	if chain.GetHeader(header.Hash(), number) != nil {
    83  		return nil
    84  	}
    85  	parent := chain.GetHeader(header.ParentHash(), number-1)
    86  	if parent == nil {
    87  		return consensus.ErrUnknownAncestor
    88  	}
    89  	// Sanity checks passed, do a proper verification
    90  	return progpow.verifyHeader(chain, header, parent, false, time.Now().Unix())
    91  }
    92  
    93  // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
    94  // concurrently. The method returns a quit channel to abort the operations and
    95  // a results channel to retrieve the async verifications.
    96  func (progpow *Progpow) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
    97  	// If we're running a full engine faking, accept any input as valid
    98  	if progpow.config.PowMode == ModeFullFake || len(headers) == 0 {
    99  		abort, results := make(chan struct{}), make(chan error, len(headers))
   100  		for i := 0; i < len(headers); i++ {
   101  			results <- nil
   102  		}
   103  		return abort, results
   104  	}
   105  
   106  	// Spawn as many workers as allowed threads
   107  	workers := runtime.GOMAXPROCS(0)
   108  	if len(headers) < workers {
   109  		workers = len(headers)
   110  	}
   111  
   112  	// Create a task channel and spawn the verifiers
   113  	var (
   114  		inputs  = make(chan int)
   115  		done    = make(chan int, workers)
   116  		errors  = make([]error, len(headers))
   117  		abort   = make(chan struct{})
   118  		unixNow = time.Now().Unix()
   119  	)
   120  	for i := 0; i < workers; i++ {
   121  		go func() {
   122  			for index := range inputs {
   123  				errors[index] = progpow.verifyHeaderWorker(chain, headers, index, unixNow)
   124  				done <- index
   125  			}
   126  		}()
   127  	}
   128  
   129  	errorsOut := make(chan error, len(headers))
   130  	go func() {
   131  		defer close(inputs)
   132  		var (
   133  			in, out = 0, 0
   134  			checked = make([]bool, len(headers))
   135  			inputs  = inputs
   136  		)
   137  		for {
   138  			select {
   139  			case inputs <- in:
   140  				if in++; in == len(headers) {
   141  					// Reached end of headers. Stop sending to workers.
   142  					inputs = nil
   143  				}
   144  			case index := <-done:
   145  				for checked[index] = true; checked[out]; out++ {
   146  					errorsOut <- errors[out]
   147  					if out == len(headers)-1 {
   148  						return
   149  					}
   150  				}
   151  			case <-abort:
   152  				return
   153  			}
   154  		}
   155  	}()
   156  	return abort, errorsOut
   157  }
   158  
   159  func (progpow *Progpow) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, index int, unixNow int64) error {
   160  	var parent *types.Header
   161  	if index == 0 {
   162  		parent = chain.GetHeader(headers[0].ParentHash(), headers[0].NumberU64()-1)
   163  	} else if headers[index-1].Hash() == headers[index].ParentHash() {
   164  		parent = headers[index-1]
   165  	}
   166  	if parent == nil {
   167  		return consensus.ErrUnknownAncestor
   168  	}
   169  	return progpow.verifyHeader(chain, headers[index], parent, false, unixNow)
   170  }
   171  
   172  // VerifyUncles verifies that the given block's uncles conform to the consensus
   173  // rules of the stock Quai progpow engine.
   174  func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
   175  	// If we're running a full engine faking, accept any input as valid
   176  	if progpow.config.PowMode == ModeFullFake {
   177  		return nil
   178  	}
   179  	// Verify that there are at most 2 uncles included in this block
   180  	if len(block.Uncles()) > maxUncles {
   181  		return errTooManyUncles
   182  	}
   183  	if len(block.Uncles()) == 0 {
   184  		return nil
   185  	}
   186  	// Gather the set of past uncles and ancestors
   187  	uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.Header)
   188  
   189  	number, parent := block.NumberU64()-1, block.ParentHash()
   190  	for i := 0; i < 7; i++ {
   191  		ancestorHeader := chain.GetHeader(parent, number)
   192  		if ancestorHeader == nil {
   193  			break
   194  		}
   195  		ancestors[parent] = ancestorHeader
   196  		// If the ancestor doesn't have any uncles, we don't have to iterate them
   197  		if ancestorHeader.UncleHash() != types.EmptyUncleHash {
   198  			// Need to add those uncles to the banned list too
   199  			ancestor := chain.GetBlock(parent, number)
   200  			if ancestor == nil {
   201  				break
   202  			}
   203  			for _, uncle := range ancestor.Uncles() {
   204  				uncles.Add(uncle.Hash())
   205  			}
   206  		}
   207  		parent, number = ancestorHeader.ParentHash(), number-1
   208  	}
   209  	ancestors[block.Hash()] = block.Header()
   210  	uncles.Add(block.Hash())
   211  
   212  	// Verify each of the uncles that it's recent, but not an ancestor
   213  	for _, uncle := range block.Uncles() {
   214  		// Make sure every uncle is rewarded only once
   215  		hash := uncle.Hash()
   216  		if uncles.Contains(hash) {
   217  			return errDuplicateUncle
   218  		}
   219  		uncles.Add(hash)
   220  
   221  		// Make sure the uncle has a valid ancestry
   222  		if ancestors[hash] != nil {
   223  			return errUncleIsAncestor
   224  		}
   225  		if ancestors[uncle.ParentHash()] == nil || uncle.ParentHash() == block.ParentHash() {
   226  			return errDanglingUncle
   227  		}
   228  		if err := progpow.verifyHeader(chain, uncle, ancestors[uncle.ParentHash()], true, time.Now().Unix()); err != nil {
   229  			return err
   230  		}
   231  	}
   232  	return nil
   233  }
   234  
   235  // verifyHeader checks whether a header conforms to the consensus rules
   236  func (progpow *Progpow) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, unixNow int64) error {
   237  	nodeCtx := common.NodeLocation.Context()
   238  	// Ensure that the header's extra-data section is of a reasonable size
   239  	if uint64(len(header.Extra())) > params.MaximumExtraDataSize {
   240  		return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra()), params.MaximumExtraDataSize)
   241  	}
   242  	// Verify the header's timestamp
   243  	if !uncle {
   244  		if header.Time() > uint64(unixNow+allowedFutureBlockTimeSeconds) {
   245  			return consensus.ErrFutureBlock
   246  		}
   247  	}
   248  	if header.Time() < parent.Time() {
   249  		return errOlderBlockTime
   250  	}
   251  	// Verify the block's difficulty based on its timestamp and parent's difficulty
   252  	// difficulty adjustment can only be checked in zone
   253  	if nodeCtx == common.ZONE_CTX {
   254  		expected := progpow.CalcDifficulty(chain, parent)
   255  		if expected.Cmp(header.Difficulty()) != 0 {
   256  			return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty(), expected)
   257  		}
   258  	}
   259  	// Verify the engine specific seal securing the block
   260  	_, order, err := progpow.CalcOrder(parent)
   261  	if err != nil {
   262  		return err
   263  	}
   264  	if order > nodeCtx {
   265  		return fmt.Errorf("order of the block is greater than the context")
   266  	}
   267  
   268  	if !common.NodeLocation.InSameSliceAs(header.Location()) {
   269  		return fmt.Errorf("block location is not in the same slice as the node location")
   270  	}
   271  	// Verify that the parent entropy is calculated correctly on the header
   272  	parentEntropy := progpow.TotalLogS(parent)
   273  	if parentEntropy.Cmp(header.ParentEntropy()) != 0 {
   274  		return fmt.Errorf("invalid parent entropy: have %v, want %v", header.ParentEntropy(), parentEntropy)
   275  	}
   276  	// If not prime, verify the parentDeltaS field as well
   277  	if nodeCtx > common.PRIME_CTX {
   278  		_, parentOrder, _ := progpow.CalcOrder(parent)
   279  		// If parent was dom, deltaS is zero and otherwise should be the calc delta s on the parent
   280  		if parentOrder < nodeCtx {
   281  			if common.Big0.Cmp(header.ParentDeltaS()) != 0 {
   282  				return fmt.Errorf("invalid parent delta s: have %v, want %v", header.ParentDeltaS(), common.Big0)
   283  			}
   284  		} else {
   285  			parentDeltaS := progpow.DeltaLogS(parent)
   286  			if parentDeltaS.Cmp(header.ParentDeltaS()) != 0 {
   287  				return fmt.Errorf("invalid parent delta s: have %v, want %v", header.ParentDeltaS(), parentDeltaS)
   288  			}
   289  		}
   290  	}
   291  	if nodeCtx == common.ZONE_CTX {
   292  		// check if the header coinbase is in scope
   293  		_, err := header.Coinbase().InternalAddress()
   294  		if err != nil {
   295  			return fmt.Errorf("out-of-scope coinbase in the header")
   296  		}
   297  		// Verify that the gas limit is <= 2^63-1
   298  		cap := uint64(0x7fffffffffffffff)
   299  		if header.GasLimit() > cap {
   300  			return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit(), cap)
   301  		}
   302  		// Verify that the gasUsed is <= gasLimit
   303  		if header.GasUsed() > header.GasLimit() {
   304  			return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed(), header.GasLimit())
   305  		}
   306  		// Verify the block's gas usage and verify the base fee.
   307  		// Verify that the gas limit remains within allowed bounds
   308  		expectedGasLimit := core.CalcGasLimit(parent, progpow.config.GasCeil)
   309  		if expectedGasLimit != header.GasLimit() {
   310  			return fmt.Errorf("invalid gasLimit: have %d, want %d",
   311  				header.GasLimit(), expectedGasLimit)
   312  		}
   313  		// Verify the header is not malformed
   314  		if header.BaseFee() == nil {
   315  			return fmt.Errorf("header is missing baseFee")
   316  		}
   317  		// Verify the baseFee is correct based on the parent header.
   318  		expectedBaseFee := misc.CalcBaseFee(chain.Config(), parent)
   319  		if header.BaseFee().Cmp(expectedBaseFee) != 0 {
   320  			return fmt.Errorf("invalid baseFee: have %s, want %s, parentBaseFee %s, parentGasUsed %d",
   321  				expectedBaseFee, header.BaseFee(), parent.BaseFee(), parent.GasUsed())
   322  		}
   323  	}
   324  	// Verify that the block number is parent's +1
   325  	if diff := new(big.Int).Sub(header.Number(), parent.Number()); diff.Cmp(big.NewInt(1)) != 0 {
   326  		return consensus.ErrInvalidNumber
   327  	}
   328  	return nil
   329  }
   330  
   331  // CalcDifficulty is the difficulty adjustment algorithm. It returns
   332  // the difficulty that a new block should have when created at time
   333  // given the parent block's time and difficulty.
   334  func (progpow *Progpow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.Header) *big.Int {
   335  	nodeCtx := common.NodeLocation.Context()
   336  
   337  	if nodeCtx != common.ZONE_CTX {
   338  		log.Error("Cannot CalcDifficulty for", "context", nodeCtx)
   339  		return nil
   340  	}
   341  
   342  	///// Algorithm:
   343  	///// e = (DurationLimit - (parent.Time() - parentOfParent.Time())) * parent.Difficulty()
   344  	///// k = Floor(BinaryLog(parent.Difficulty()))/(DurationLimit*DifficultyAdjustmentFactor*AdjustmentPeriod)
   345  	///// Difficulty = Max(parent.Difficulty() + e * k, MinimumDifficulty)
   346  
   347  	if parent.Hash() == chain.Config().GenesisHash {
   348  		return parent.Difficulty()
   349  	}
   350  	parentOfParent := chain.GetHeaderByHash(parent.ParentHash())
   351  	if parentOfParent == nil || parentOfParent.Hash() == chain.Config().GenesisHash {
   352  		return parent.Difficulty()
   353  	}
   354  
   355  	time := parent.Time()
   356  	bigTime := new(big.Int).SetUint64(time)
   357  	bigParentTime := new(big.Int).SetUint64(parentOfParent.Time())
   358  
   359  	// holds intermediate values to make the algo easier to read & audit
   360  	x := new(big.Int)
   361  	x.Sub(bigTime, bigParentTime)
   362  	x.Sub(progpow.config.DurationLimit, x)
   363  	x.Mul(x, parent.Difficulty())
   364  	k, _ := mathutil.BinaryLog(new(big.Int).Set(parent.Difficulty()), 64)
   365  	x.Mul(x, big.NewInt(int64(k)))
   366  	x.Div(x, progpow.config.DurationLimit)
   367  	x.Div(x, big.NewInt(params.DifficultyAdjustmentFactor))
   368  	x.Div(x, params.DifficultyAdjustmentPeriod)
   369  	x.Add(x, parent.Difficulty())
   370  
   371  	// minimum difficulty can ever be (before exponential factor)
   372  	if x.Cmp(progpow.config.MinDifficulty) < 0 {
   373  		x.Set(progpow.config.MinDifficulty)
   374  	}
   375  	return x
   376  }
   377  
   378  func (progpow *Progpow) IsDomCoincident(chain consensus.ChainHeaderReader, header *types.Header) bool {
   379  	_, order, err := progpow.CalcOrder(header)
   380  	if err != nil {
   381  		return false
   382  	}
   383  	return order < common.NodeLocation.Context()
   384  }
   385  
   386  func (progpow *Progpow) ComputePowLight(header *types.Header) (mixHash, powHash common.Hash) {
   387  	powLight := func(size uint64, cache []uint32, hash []byte, nonce uint64, blockNumber uint64) ([]byte, []byte) {
   388  		ethashCache := progpow.cache(blockNumber)
   389  		if ethashCache.cDag == nil {
   390  			cDag := make([]uint32, progpowCacheWords)
   391  			generateCDag(cDag, ethashCache.cache, blockNumber/epochLength)
   392  			ethashCache.cDag = cDag
   393  		}
   394  		return progpowLight(size, cache, hash, nonce, blockNumber, ethashCache.cDag)
   395  	}
   396  	cache := progpow.cache(header.NumberU64())
   397  	size := datasetSize(header.NumberU64())
   398  	digest, result := powLight(size, cache.cache, header.SealHash().Bytes(), header.NonceU64(), header.NumberU64(common.ZONE_CTX))
   399  	mixHash = common.BytesToHash(digest)
   400  	powHash = common.BytesToHash(result)
   401  	header.PowDigest.Store(mixHash)
   402  	header.PowHash.Store(powHash)
   403  
   404  	// Caches are unmapped in a finalizer. Ensure that the cache stays alive
   405  	// until after the call to hashimotoLight so it's not unmapped while being used.
   406  	runtime.KeepAlive(cache)
   407  
   408  	return mixHash, powHash
   409  }
   410  
   411  // VerifySeal returns the PowHash and the verifySeal output
   412  func (progpow *Progpow) VerifySeal(header *types.Header) (common.Hash, error) {
   413  	return progpow.verifySeal(header)
   414  }
   415  
   416  // verifySeal checks whether a block satisfies the PoW difficulty requirements,
   417  // either using the usual progpow cache for it, or alternatively using a full DAG
   418  // to make remote mining fast.
   419  func (progpow *Progpow) verifySeal(header *types.Header) (common.Hash, error) {
   420  	// If we're running a fake PoW, accept any seal as valid
   421  	if progpow.config.PowMode == ModeFake || progpow.config.PowMode == ModeFullFake {
   422  		time.Sleep(progpow.fakeDelay)
   423  		if progpow.fakeFail == header.Number().Uint64() {
   424  			return common.Hash{}, errInvalidPoW
   425  		}
   426  		return common.Hash{}, nil
   427  	}
   428  	// If we're running a shared PoW, delegate verification to it
   429  	if progpow.shared != nil {
   430  		return progpow.shared.verifySeal(header)
   431  	}
   432  	// Ensure that we have a valid difficulty for the block
   433  	if header.Difficulty().Sign() <= 0 {
   434  		return common.Hash{}, errInvalidDifficulty
   435  	}
   436  	// Check progpow
   437  	mixHash := header.PowDigest.Load()
   438  	powHash := header.PowHash.Load()
   439  	if powHash == nil || mixHash == nil {
   440  		mixHash, powHash = progpow.ComputePowLight(header)
   441  	}
   442  	// Verify the calculated values against the ones provided in the header
   443  	if !bytes.Equal(header.MixHash().Bytes(), mixHash.(common.Hash).Bytes()) {
   444  		return common.Hash{}, errInvalidMixHash
   445  	}
   446  	target := new(big.Int).Div(big2e256, header.Difficulty())
   447  	if new(big.Int).SetBytes(powHash.(common.Hash).Bytes()).Cmp(target) > 0 {
   448  		return powHash.(common.Hash), errInvalidPoW
   449  	}
   450  	return powHash.(common.Hash), nil
   451  }
   452  
   453  // Prepare implements consensus.Engine, initializing the difficulty field of a
   454  // header to conform to the progpow protocol. The changes are done inline.
   455  func (progpow *Progpow) Prepare(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header) error {
   456  	header.SetDifficulty(progpow.CalcDifficulty(chain, parent))
   457  	return nil
   458  }
   459  
   460  // Finalize implements consensus.Engine, accumulating the block and uncle rewards,
   461  // setting the final state on the header
   462  func (progpow *Progpow) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
   463  	// Accumulate any block and uncle rewards and commit the final state root
   464  	accumulateRewards(chain.Config(), state, header, uncles)
   465  
   466  	if common.NodeLocation.Context() == common.ZONE_CTX && header.ParentHash() == chain.Config().GenesisHash {
   467  		alloc := core.ReadGenesisAlloc("genallocs/gen_alloc_" + common.NodeLocation.Name() + ".json")
   468  		log.Info("Allocating genesis accounts", "num", len(alloc))
   469  
   470  		for addressString, account := range alloc {
   471  			addr := common.HexToAddress(addressString)
   472  			internal, err := addr.InternalAddress()
   473  			if err != nil {
   474  				log.Error("Provided address in genesis block is out of scope")
   475  			}
   476  			state.AddBalance(internal, account.Balance)
   477  			state.SetCode(internal, account.Code)
   478  			state.SetNonce(internal, account.Nonce)
   479  			for key, value := range account.Storage {
   480  				state.SetState(internal, key, value)
   481  			}
   482  		}
   483  	}
   484  
   485  	header.SetRoot(state.IntermediateRoot(true))
   486  }
   487  
   488  // FinalizeAndAssemble implements consensus.Engine, accumulating the block and
   489  // uncle rewards, setting the final state and assembling the block.
   490  func (progpow *Progpow) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.Block, error) {
   491  	nodeCtx := common.NodeLocation.Context()
   492  	if nodeCtx == common.ZONE_CTX && chain.ProcessingState() {
   493  		// Finalize block
   494  		progpow.Finalize(chain, header, state, txs, uncles)
   495  	}
   496  
   497  	// Header seems complete, assemble into a block and return
   498  	return types.NewBlock(header, txs, uncles, etxs, subManifest, receipts, trie.NewStackTrie(nil)), nil
   499  }
   500  
   501  // AccumulateRewards credits the coinbase of the given block with the mining
   502  // reward. The total reward consists of the static block reward and rewards for
   503  // included uncles. The coinbase of each uncle block is also rewarded.
   504  func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
   505  	// Select the correct block reward based on chain progression
   506  	blockReward := misc.CalculateReward(header)
   507  
   508  	coinbase, err := header.Coinbase().InternalAddress()
   509  	if err != nil {
   510  		log.Error("Block has out-of-scope coinbase, skipping block reward: " + header.Hash().String())
   511  		return
   512  	}
   513  
   514  	// Accumulate the rewards for the miner and any included uncles
   515  	reward := new(big.Int).Set(blockReward)
   516  	r := new(big.Int)
   517  	for _, uncle := range uncles {
   518  		coinbase, err := uncle.Coinbase().InternalAddress()
   519  		if err != nil {
   520  			log.Error("Found uncle with out-of-scope coinbase, skipping reward: " + uncle.Hash().String())
   521  			continue
   522  		}
   523  		r.Add(uncle.Number(), big8)
   524  		r.Sub(r, header.Number())
   525  		r.Mul(r, blockReward)
   526  		r.Div(r, big8)
   527  		state.AddBalance(coinbase, r)
   528  
   529  		r.Div(blockReward, big32)
   530  		reward.Add(reward, r)
   531  	}
   532  	state.AddBalance(coinbase, reward)
   533  }