gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/consensus/difficulty.go (about)

     1  package consensus
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"math/big"
     7  
     8  	bolt "github.com/coreos/bbolt"
     9  	"gitlab.com/SiaPrime/SiaPrime/types"
    10  
    11  	"gitlab.com/NebulousLabs/errors"
    12  )
    13  
    14  // difficulty.go defines the Oak difficulty adjustment algorithm.
    15  //
    16  // A running tally is maintained which keeps the total difficulty and total time
    17  // passed across all blocks. The total difficulty can be divided by the total
    18  // time to get a hashrate. The total is multiplied by 0.995 each block, to keep
    19  // exponential preference on recent blocks with a half life of 144 data points.
    20  // This is about 24 hours. This estimated hashrate is assumed to closely match
    21  // the actual hashrate on the network.
    22  //
    23  // There is a target block time. If the difficulty increases or decreases, the
    24  // total amount of time that has passed will be more or less than the target
    25  // amount of time passed for the current height. To counteract this, the target
    26  // block time for each block is adjusted based on how far away from the desired
    27  // total time passed the current total time passed is. If the total time passed
    28  // is too low, blocks are targeted to be slightly longer, which helps to correct
    29  // the network. And if the total time passed is too high, blocks are targeted to
    30  // be slightly shorter, to help correct the network.
    31  //
    32  // High variance in block times means that the corrective action should not be
    33  // very strong if the total time passed has only missed the target time passed
    34  // by a few hours. But if the total time passed is significantly off, the block
    35  // time corrections should be much stronger. The square of the total deviation
    36  // is used to figure out what the adjustment should be. At 10,000 seconds
    37  // variance (about 3 hours), blocks will be adjusted by 10 seconds each. At
    38  // 20,000 seconds, blocks will be adjusted by 40 seconds each, a 4x adjustment
    39  // for 2x the error. And at 40,000 seconds, blocks will be adjusted by 160
    40  // seconds each, and so on.
    41  //
    42  // The total amount of blocktime adjustment is capped to 1/3 and 3x the target
    43  // blocktime, to prevent too much disruption on the network. If blocks are
    44  // actually coming out 3x as fast as intended, there will be a (temporary)
    45  // significant increase on the amount of strain on nodes to process blocks. And
    46  // at 1/3 the target blocktime, the total blockchain throughput will decrease
    47  // dramatically.
    48  //
    49  // Finally, one extra cap is applied to the difficulty adjustment - the
    50  // difficulty of finding a block is not allowed to change more than 0.4% every
    51  // block. This maps to a total possible difficulty change of 55x across 1008
    52  // blocks. This clamp helps to prevent wild swings when the hashrate increases
    53  // or decreases rapidly on the network, and it also limits the amount of damange
    54  // that a malicious attacker can do if performing a difficulty raising attack.
    55  
    56  // childTargetOak sets the child target based on the total time delta and total
    57  // hashrate of the parent block. The deltas are known for the child block,
    58  // however we do not use the child block deltas because that would allow the
    59  // child block to influence the target of the following block, which makes abuse
    60  // easier in selfish mining scenarios.
    61  func (cs *ConsensusSet) childTargetOak(parentTotalTime int64, parentTotalTarget, currentTarget types.Target, parentHeight types.BlockHeight, parentTimestamp types.Timestamp) types.Target {
    62  	// Determine the delta of the current total time vs. the desired total time.
    63  	// The desired total time is the difference between the genesis block
    64  	// timestamp and the current block timestamp.
    65  	var delta int64
    66  	// This is the correct code. The expected time is an absolute time based
    67  	// on the genesis block, and the delta is an absolute time based on the
    68  	// timestamp of the parent block.
    69  	//
    70  	// Rules elsewhere in consensus ensure that the timestamp of the parent
    71  	// block has not been manipulated by more than a few hours, which is
    72  	// accurate enough for this logic to be safe.
    73  	expectedTime := int64(types.BlockFrequency*parentHeight) + int64(types.GenesisTimestamp)
    74  	delta = expectedTime - int64(parentTimestamp)
    75  
    76  	// Convert the delta in to a target block time.
    77  	square := delta * delta
    78  	if delta < 0 {
    79  		// If the delta is negative, restore the negative value.
    80  		square *= -1
    81  	}
    82  	shift := square / 10e6 // 10e3 second delta leads to 10 second shift.
    83  	targetBlockTime := int64(types.BlockFrequency) + shift
    84  
    85  	// Clamp the block time to 1/3 and 3x the target block time.
    86  	if targetBlockTime < int64(types.BlockFrequency)/types.OakMaxBlockShift {
    87  		targetBlockTime = int64(types.BlockFrequency) / types.OakMaxBlockShift
    88  	}
    89  	if targetBlockTime > int64(types.BlockFrequency)*types.OakMaxBlockShift {
    90  		targetBlockTime = int64(types.BlockFrequency) * types.OakMaxBlockShift
    91  	}
    92  
    93  	// Determine the hashrate using the total time and total target. Set a
    94  	// minimum total time of 1 to prevent divide by zero and underflows.
    95  	if parentTotalTime < 1 {
    96  		parentTotalTime = 1
    97  	}
    98  	visibleHashrate := parentTotalTarget.Difficulty().Div64(uint64(parentTotalTime)) // Hashes per second.
    99  	// Handle divide by zero risks.
   100  	if visibleHashrate.IsZero() {
   101  		visibleHashrate = visibleHashrate.Add(types.NewCurrency64(1))
   102  	}
   103  	if targetBlockTime == 0 {
   104  		// This code can only possibly be triggered if the block frequency is
   105  		// less than 3, but during testing the block frequency is 1.
   106  		targetBlockTime = 1
   107  	}
   108  
   109  	// Determine the new target by multiplying the visible hashrate by the
   110  	// target block time. Clamp it to a 0.4% difficulty adjustment.
   111  	maxNewTarget := currentTarget.MulDifficulty(types.OakMaxRise) // Max = difficulty increase (target decrease)
   112  	minNewTarget := currentTarget.MulDifficulty(types.OakMaxDrop) // Min = difficulty decrease (target increase)
   113  	newTarget := types.RatToTarget(new(big.Rat).SetFrac(types.RootDepth.Int(), visibleHashrate.Mul64(uint64(targetBlockTime)).Big()))
   114  	if newTarget.Cmp(maxNewTarget) < 0 && parentHeight+1 != types.ASICHardforkHeight {
   115  		newTarget = maxNewTarget
   116  	}
   117  	if newTarget.Cmp(minNewTarget) > 0 && parentHeight+1 != types.ASICHardforkHeight {
   118  		// This can only possibly trigger if the BlockFrequency is less than 3
   119  		// seconds, but during testing it is 1 second.
   120  		newTarget = minNewTarget
   121  	}
   122  	return newTarget
   123  }
   124  
   125  // getBlockTotals returns the block totals values that get stored in
   126  // storeBlockTotals.
   127  func (cs *ConsensusSet) getBlockTotals(tx *bolt.Tx, id types.BlockID) (totalTime int64, totalTarget types.Target) {
   128  	totalsBytes := tx.Bucket(BucketOak).Get(id[:])
   129  	totalTime = int64(binary.LittleEndian.Uint64(totalsBytes[:8]))
   130  	copy(totalTarget[:], totalsBytes[8:])
   131  	return
   132  }
   133  
   134  // storeBlockTotals computes the new total time and total target for the current
   135  // block and stores that new time in the database. It also returns the new
   136  // totals.
   137  func (cs *ConsensusSet) storeBlockTotals(tx *bolt.Tx, currentHeight types.BlockHeight, currentBlockID types.BlockID, prevTotalTime int64, parentTimestamp, currentTimestamp types.Timestamp, prevTotalTarget, targetOfCurrentBlock types.Target) (newTotalTime int64, newTotalTarget types.Target, err error) {
   138  	// Reset the prevTotalTime to a delta of zero just before the hardfork.
   139  
   140  	// For each value, first multiply by the decay, and then add in the new
   141  	// delta.
   142  	newTotalTime = (prevTotalTime * types.OakDecayNum / types.OakDecayDenom) + (int64(currentTimestamp) - int64(parentTimestamp))
   143  	newTotalTarget = prevTotalTarget.MulDifficulty(big.NewRat(types.OakDecayNum, types.OakDecayDenom)).AddDifficulties(targetOfCurrentBlock)
   144  
   145  	// At the hardfork height to adjust the acceptable nonce conditions, reset
   146  	// the total time and total target.
   147  	if currentHeight+1 == types.ASICHardforkHeight {
   148  		newTotalTime = types.ASICHardforkTotalTime
   149  		newTotalTarget = types.ASICHardforkTotalTarget
   150  	}
   151  
   152  	// Store the new total time and total target in the database at the
   153  	// appropriate id.
   154  	bytes := make([]byte, 40)
   155  	binary.LittleEndian.PutUint64(bytes[:8], uint64(newTotalTime))
   156  	copy(bytes[8:], newTotalTarget[:])
   157  	err = tx.Bucket(BucketOak).Put(currentBlockID[:], bytes)
   158  	if err != nil {
   159  		return 0, types.Target{}, errors.Extend(errors.New("unable to store total time values"), err)
   160  	}
   161  	return newTotalTime, newTotalTarget, nil
   162  }
   163  
   164  // initOak will initialize all of the oak difficulty adjustment related fields.
   165  // This is separate from the initialization process for compatibility reasons -
   166  // some databases will not have these fields at start, so it much be checked.
   167  //
   168  // After oak initialization is complete, a specific field in the oak bucket is
   169  // marked so that oak initialization can be skipped in the future.
   170  func (cs *ConsensusSet) initOak(tx *bolt.Tx) error {
   171  	// Prep the oak bucket.
   172  	bucketOak, err := tx.CreateBucketIfNotExists(BucketOak)
   173  	if err != nil {
   174  		return errors.Extend(errors.New("unable to create oak bucket"), err)
   175  	}
   176  	// Check whether the init field is set.
   177  	if bytes.Equal(bucketOak.Get(FieldOakInit), ValueOakInit) {
   178  		// The oak fields have been initialized, nothing to do.
   179  		return nil
   180  	}
   181  
   182  	height := blockHeight(tx)
   183  
   184  	// Store base values for the genesis block.
   185  	totalTime, totalTarget, err := cs.storeBlockTotals(tx, 0, types.GenesisID, 0, types.GenesisTimestamp, types.GenesisTimestamp, types.RootDepth, types.RootTarget)
   186  	if err != nil {
   187  		return errors.Extend(errors.New("unable to store genesis block totals"), err)
   188  	}
   189  
   190  	// The Oak fields have not been initialized, scan through the consensus set
   191  	// and set the fields for each block.
   192  	parentTimestamp := types.GenesisTimestamp
   193  	parentChildTarget := types.RootTarget
   194  	for i := types.BlockHeight(1); i <= height; i++ { // Skip Genesis block
   195  		// Fetch the processed block for the current block.
   196  		id, err := getPath(tx, i)
   197  		if err != nil {
   198  			return errors.Extend(errors.New("unable to find block at height"), err)
   199  		}
   200  		pb, err := getBlockMap(tx, id)
   201  		if err != nil {
   202  			return errors.Extend(errors.New("unable to find block from id"), err)
   203  		}
   204  
   205  		// Calculate and store the new block totals.
   206  		totalTime, totalTarget, err = cs.storeBlockTotals(tx, i, id, totalTime, parentTimestamp, pb.Block.Timestamp, totalTarget, parentChildTarget)
   207  		if err != nil {
   208  			return errors.Extend(errors.New("unable to store updated block totals"), err)
   209  		}
   210  		// Update the previous values.
   211  		parentTimestamp = pb.Block.Timestamp
   212  		parentChildTarget = pb.ChildTarget
   213  	}
   214  
   215  	// Tag the initialization field in the oak bucket, indicating that
   216  	// initialization has completed.
   217  	err = bucketOak.Put(FieldOakInit, ValueOakInit)
   218  	if err != nil {
   219  		return errors.Extend(errors.New("unable to put oak init confirmation into oak bucket"), err)
   220  	}
   221  	return nil
   222  }