github.com/NebulousLabs/Sia@v1.3.7/modules/consensus/difficulty.go (about)

     1  package consensus
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"math/big"
     7  
     8  	"github.com/NebulousLabs/Sia/types"
     9  
    10  	"github.com/NebulousLabs/errors"
    11  	"github.com/coreos/bbolt"
    12  )
    13  
    14  // Errors returned by this file.
    15  var (
    16  	// errOakHardforkIncompatibility is the error returned if Oak initialization
    17  	// cannot begin because the consensus database was not upgraded before the
    18  	// hardfork height.
    19  	errOakHardforkIncompatibility = errors.New("difficulty adjustment hardfork incompatibility detected")
    20  )
    21  
    22  // difficulty.go defines the Oak difficulty adjustment algorithm. Past the
    23  // hardfork trigger height, it the algorithm that Sia uses to adjust the
    24  // difficulty.
    25  //
    26  // A running tally is maintained which keeps the total difficulty and total time
    27  // passed across all blocks. The total difficulty can be divided by the total
    28  // time to get a hashrate. The total is multiplied by 0.995 each block, to keep
    29  // exponential preference on recent blocks with a half life of 144 data points.
    30  // This is about 24 hours. This estimated hashrate is assumed to closely match
    31  // the actual hashrate on the network.
    32  //
    33  // There is a target block time. If the difficulty increases or decreases, the
    34  // total amount of time that has passed will be more or less than the target
    35  // amount of time passed for the current height. To counteract this, the target
    36  // block time for each block is adjusted based on how far away from the desired
    37  // total time passed the current total time passed is. If the total time passed
    38  // is too low, blocks are targeted to be slightly longer, which helps to correct
    39  // the network. And if the total time passed is too high, blocks are targeted to
    40  // be slightly shorter, to help correct the network.
    41  //
    42  // High variance in block times means that the corrective action should not be
    43  // very strong if the total time passed has only missed the target time passed
    44  // by a few hours. But if the total time passed is significantly off, the block
    45  // time corrections should be much stronger. The square of the total deviation
    46  // is used to figure out what the adjustment should be. At 10,000 seconds
    47  // variance (about 3 hours), blocks will be adjusted by 10 seconds each. At
    48  // 20,000 seconds, blocks will be adjusted by 40 seconds each, a 4x adjustment
    49  // for 2x the error. And at 40,000 seconds, blocks will be adjusted by 160
    50  // seconds each, and so on.
    51  //
    52  // The total amount of blocktime adjustment is capped to 1/3 and 3x the target
    53  // blocktime, to prevent too much disruption on the network. If blocks are
    54  // actually coming out 3x as fast as intended, there will be a (temporary)
    55  // significant increase on the amount of strain on nodes to process blocks. And
    56  // at 1/3 the target blocktime, the total blockchain throughput will decrease
    57  // dramatically.
    58  //
    59  // Finally, one extra cap is applied to the difficulty adjustment - the
    60  // difficulty of finding a block is not allowed to change more than 0.4% every
    61  // block. This maps to a total possible difficulty change of 55x across 1008
    62  // blocks. This clamp helps to prevent wild swings when the hashrate increases
    63  // or decreases rapidly on the network, and it also limits the amount of damange
    64  // that a malicious attacker can do if performing a difficulty raising attack.
    65  
    66  // childTargetOak sets the child target based on the total time delta and total
    67  // hashrate of the parent block. The deltas are known for the child block,
    68  // however we do not use the child block deltas because that would allow the
    69  // child block to influence the target of the following block, which makes abuse
    70  // easier in selfish mining scenarios.
    71  func (cs *ConsensusSet) childTargetOak(parentTotalTime int64, parentTotalTarget, currentTarget types.Target, parentHeight types.BlockHeight, parentTimestamp types.Timestamp) types.Target {
    72  	// Determine the delta of the current total time vs. the desired total time.
    73  	// The desired total time is the difference between the genesis block
    74  	// timestamp and the current block timestamp.
    75  	var delta int64
    76  	if parentHeight < types.OakHardforkFixBlock {
    77  		// This is the original code. It is incorrect, because it is comparing
    78  		// 'expectedTime', an absolute value, to 'parentTotalTime', a value
    79  		// which gets compressed every block. The result is that 'expectedTime'
    80  		// is substantially larger than 'parentTotalTime' always, and that the
    81  		// shifter is always reading that blocks have been coming out far too
    82  		// quickly.
    83  		expectedTime := int64(types.BlockFrequency * parentHeight)
    84  		delta = expectedTime - parentTotalTime
    85  	} else {
    86  		// This is the correct code. The expected time is an absolute time based
    87  		// on the genesis block, and the delta is an absolute time based on the
    88  		// timestamp of the parent block.
    89  		//
    90  		// Rules elsewhere in consensus ensure that the timestamp of the parent
    91  		// block has not been manipulated by more than a few hours, which is
    92  		// accurate enough for this logic to be safe.
    93  		expectedTime := int64(types.BlockFrequency*parentHeight) + int64(types.GenesisTimestamp)
    94  		delta = expectedTime - int64(parentTimestamp)
    95  	}
    96  	// Convert the delta in to a target block time.
    97  	square := delta * delta
    98  	if delta < 0 {
    99  		// If the delta is negative, restore the negative value.
   100  		square *= -1
   101  	}
   102  	shift := square / 10e6 // 10e3 second delta leads to 10 second shift.
   103  	targetBlockTime := int64(types.BlockFrequency) + shift
   104  
   105  	// Clamp the block time to 1/3 and 3x the target block time.
   106  	if targetBlockTime < int64(types.BlockFrequency)/types.OakMaxBlockShift {
   107  		targetBlockTime = int64(types.BlockFrequency) / types.OakMaxBlockShift
   108  	}
   109  	if targetBlockTime > int64(types.BlockFrequency)*types.OakMaxBlockShift {
   110  		targetBlockTime = int64(types.BlockFrequency) * types.OakMaxBlockShift
   111  	}
   112  
   113  	// Determine the hashrate using the total time and total target. Set a
   114  	// minimum total time of 1 to prevent divide by zero and underflows.
   115  	if parentTotalTime < 1 {
   116  		parentTotalTime = 1
   117  	}
   118  	visibleHashrate := parentTotalTarget.Difficulty().Div64(uint64(parentTotalTime)) // Hashes per second.
   119  	// Handle divide by zero risks.
   120  	if visibleHashrate.IsZero() {
   121  		visibleHashrate = visibleHashrate.Add(types.NewCurrency64(1))
   122  	}
   123  	if targetBlockTime == 0 {
   124  		// This code can only possibly be triggered if the block frequency is
   125  		// less than 3, but during testing the block frequency is 1.
   126  		targetBlockTime = 1
   127  	}
   128  
   129  	// Determine the new target by multiplying the visible hashrate by the
   130  	// target block time. Clamp it to a 0.4% difficulty adjustment.
   131  	maxNewTarget := currentTarget.MulDifficulty(types.OakMaxRise) // Max = difficulty increase (target decrease)
   132  	minNewTarget := currentTarget.MulDifficulty(types.OakMaxDrop) // Min = difficulty decrease (target increase)
   133  	newTarget := types.RatToTarget(new(big.Rat).SetFrac(types.RootDepth.Int(), visibleHashrate.Mul64(uint64(targetBlockTime)).Big()))
   134  	if newTarget.Cmp(maxNewTarget) < 0 {
   135  		newTarget = maxNewTarget
   136  	}
   137  	if newTarget.Cmp(minNewTarget) > 0 {
   138  		// This can only possibly trigger if the BlockFrequency is less than 3
   139  		// seconds, but during testing it is 1 second.
   140  		newTarget = minNewTarget
   141  	}
   142  	return newTarget
   143  }
   144  
   145  // getBlockTotals returns the block totals values that get stored in
   146  // storeBlockTotals.
   147  func (cs *ConsensusSet) getBlockTotals(tx *bolt.Tx, id types.BlockID) (totalTime int64, totalTarget types.Target) {
   148  	totalsBytes := tx.Bucket(BucketOak).Get(id[:])
   149  	totalTime = int64(binary.LittleEndian.Uint64(totalsBytes[:8]))
   150  	copy(totalTarget[:], totalsBytes[8:])
   151  	return
   152  }
   153  
   154  // storeBlockTotals computes the new total time and total target for the current
   155  // block and stores that new time in the database. It also returns the new
   156  // totals.
   157  func (cs *ConsensusSet) storeBlockTotals(tx *bolt.Tx, currentHeight types.BlockHeight, currentBlockID types.BlockID, prevTotalTime int64, parentTimestamp, currentTimestamp types.Timestamp, prevTotalTarget, targetOfCurrentBlock types.Target) (newTotalTime int64, newTotalTarget types.Target, err error) {
   158  	// Reset the prevTotalTime to a delta of zero just before the hardfork.
   159  	//
   160  	// NOTICE: This code is broken, an incorrectly executed hardfork. The
   161  	// correct thing to do was to not put in these 3 lines of code. It is
   162  	// correct to not have them.
   163  	//
   164  	// This code is incorrect, and introduces an unfortunate drop in difficulty,
   165  	// because this is an uncompreesed prevTotalTime, but really it should be
   166  	// getting set to a compressed prevTotalTime. And, actually, a compressed
   167  	// prevTotalTime doesn't have much meaning, so this code block shouldn't be
   168  	// here at all. But... this is the code that was running for the block
   169  	// 135,000 hardfork, so this code needs to stay. With the standard
   170  	// constants, it should cause a disruptive bump that lasts only a few days.
   171  	//
   172  	// The disruption will be complete well before we can deploy a fix, so
   173  	// there's no point in fixing it.
   174  	if currentHeight == types.OakHardforkBlock-1 {
   175  		prevTotalTime = int64(types.BlockFrequency * currentHeight)
   176  	}
   177  
   178  	// For each value, first multiply by the decay, and then add in the new
   179  	// delta.
   180  	newTotalTime = (prevTotalTime * types.OakDecayNum / types.OakDecayDenom) + (int64(currentTimestamp) - int64(parentTimestamp))
   181  	newTotalTarget = prevTotalTarget.MulDifficulty(big.NewRat(types.OakDecayNum, types.OakDecayDenom)).AddDifficulties(targetOfCurrentBlock)
   182  
   183  	// Store the new total time and total target in the database at the
   184  	// appropriate id.
   185  	bytes := make([]byte, 40)
   186  	binary.LittleEndian.PutUint64(bytes[:8], uint64(newTotalTime))
   187  	copy(bytes[8:], newTotalTarget[:])
   188  	err = tx.Bucket(BucketOak).Put(currentBlockID[:], bytes)
   189  	if err != nil {
   190  		return 0, types.Target{}, errors.Extend(errors.New("unable to store total time values"), err)
   191  	}
   192  	return newTotalTime, newTotalTarget, nil
   193  }
   194  
   195  // initOak will initialize all of the oak difficulty adjustment related fields.
   196  // This is separate from the initialization process for compatibility reasons -
   197  // some databases will not have these fields at start, so it much be checked.
   198  //
   199  // After oak initialization is complete, a specific field in the oak bucket is
   200  // marked so that oak initialization can be skipped in the future.
   201  func (cs *ConsensusSet) initOak(tx *bolt.Tx) error {
   202  	// Prep the oak bucket.
   203  	bucketOak, err := tx.CreateBucketIfNotExists(BucketOak)
   204  	if err != nil {
   205  		return errors.Extend(errors.New("unable to create oak bucket"), err)
   206  	}
   207  	// Check whether the init field is set.
   208  	if bytes.Equal(bucketOak.Get(FieldOakInit), ValueOakInit) {
   209  		// The oak fields have been initialized, nothing to do.
   210  		return nil
   211  	}
   212  
   213  	// If the current height is greater than the hardfork trigger date, return
   214  	// an error and refuse to initialize.
   215  	height := blockHeight(tx)
   216  	if height > types.OakHardforkBlock {
   217  		return errOakHardforkIncompatibility
   218  	}
   219  
   220  	// Store base values for the genesis block.
   221  	totalTime, totalTarget, err := cs.storeBlockTotals(tx, 0, types.GenesisID, 0, types.GenesisTimestamp, types.GenesisTimestamp, types.RootDepth, types.RootTarget)
   222  	if err != nil {
   223  		return errors.Extend(errors.New("unable to store genesis block totals"), err)
   224  	}
   225  
   226  	// The Oak fields have not been initialized, scan through the consensus set
   227  	// and set the fields for each block.
   228  	parentTimestamp := types.GenesisTimestamp
   229  	parentChildTarget := types.RootTarget
   230  	for i := types.BlockHeight(1); i <= height; i++ { // Skip Genesis block
   231  		// Fetch the processed block for the current block.
   232  		id, err := getPath(tx, i)
   233  		if err != nil {
   234  			return errors.Extend(errors.New("unable to find block at height"), err)
   235  		}
   236  		pb, err := getBlockMap(tx, id)
   237  		if err != nil {
   238  			return errors.Extend(errors.New("unable to find block from id"), err)
   239  		}
   240  
   241  		// Calculate and store the new block totals.
   242  		totalTime, totalTarget, err = cs.storeBlockTotals(tx, i, id, totalTime, parentTimestamp, pb.Block.Timestamp, totalTarget, parentChildTarget)
   243  		if err != nil {
   244  			return errors.Extend(errors.New("unable to store updated block totals"), err)
   245  		}
   246  		// Update the previous values.
   247  		parentTimestamp = pb.Block.Timestamp
   248  		parentChildTarget = pb.ChildTarget
   249  	}
   250  
   251  	// Tag the initialization field in the oak bucket, indicating that
   252  	// initialization has completed.
   253  	err = bucketOak.Put(FieldOakInit, ValueOakInit)
   254  	if err != nil {
   255  		return errors.Extend(errors.New("unable to put oak init confirmation into oak bucket"), err)
   256  	}
   257  	return nil
   258  }