github.com/johnathanhowell/sia@v0.5.1-beta.0.20160524050156-83dcc3d37c94/modules/consensus/processedblock.go (about) 1 package consensus 2 3 import ( 4 "math/big" 5 6 "github.com/NebulousLabs/Sia/build" 7 "github.com/NebulousLabs/Sia/crypto" 8 "github.com/NebulousLabs/Sia/encoding" 9 "github.com/NebulousLabs/Sia/modules" 10 "github.com/NebulousLabs/Sia/types" 11 12 "github.com/NebulousLabs/bolt" 13 ) 14 15 // SurpassThreshold is a percentage that dictates how much heavier a competing 16 // chain has to be before the node will switch to mining on that chain. This is 17 // not a consensus rule. This percentage is only applied to the most recent 18 // block, not the entire chain; see blockNode.heavierThan. 19 // 20 // If no threshold were in place, it would be possible to manipulate a block's 21 // timestamp to produce a sufficiently heavier block. 22 var SurpassThreshold = big.NewRat(20, 100) 23 24 // processedBlock is a copy/rename of blockNode, with the pointers to 25 // other blockNodes replaced with block ID's, and all the fields 26 // exported, so that a block node can be marshalled 27 type processedBlock struct { 28 Block types.Block 29 Height types.BlockHeight 30 Depth types.Target 31 ChildTarget types.Target 32 33 DiffsGenerated bool 34 SiacoinOutputDiffs []modules.SiacoinOutputDiff 35 FileContractDiffs []modules.FileContractDiff 36 SiafundOutputDiffs []modules.SiafundOutputDiff 37 DelayedSiacoinOutputDiffs []modules.DelayedSiacoinOutputDiff 38 SiafundPoolDiffs []modules.SiafundPoolDiff 39 40 ConsensusChecksum crypto.Hash 41 } 42 43 // heavierThan returns true if the blockNode is sufficiently heavier than 44 // 'cmp'. 'cmp' is expected to be the current block node. "Sufficient" means 45 // that the weight of 'bn' exceeds the weight of 'cmp' by: 46 // (the target of 'cmp' * 'Surpass Threshold') 47 func (pb *processedBlock) heavierThan(cmp *processedBlock) bool { 48 requirement := cmp.Depth.AddDifficulties(cmp.ChildTarget.MulDifficulty(SurpassThreshold)) 49 return requirement.Cmp(pb.Depth) > 0 // Inversed, because the smaller target is actually heavier. 50 } 51 52 // childDepth returns the depth of a blockNode's child nodes. The depth is the 53 // "sum" of the current depth and current difficulty. See target.Add for more 54 // detailed information. 55 func (pb *processedBlock) childDepth() types.Target { 56 return pb.Depth.AddDifficulties(pb.ChildTarget) 57 } 58 59 // targetAdjustmentBase returns the magnitude that the target should be 60 // adjusted by before a clamp is applied. 61 func (cs *ConsensusSet) targetAdjustmentBase(blockMap *bolt.Bucket, pb *processedBlock) *big.Rat { 62 // Grab the block that was generated 'TargetWindow' blocks prior to the 63 // parent. If there are not 'TargetWindow' blocks yet, stop at the genesis 64 // block. 65 var windowSize types.BlockHeight 66 parent := pb.Block.ParentID 67 current := pb.Block.ID() 68 for windowSize = 0; windowSize < types.TargetWindow && parent != (types.BlockID{}); windowSize++ { 69 current = parent 70 copy(parent[:], blockMap.Get(parent[:])[:32]) 71 } 72 timestamp := types.Timestamp(encoding.DecUint64(blockMap.Get(current[:])[40:48])) 73 74 // The target of a child is determined by the amount of time that has 75 // passed between the generation of its immediate parent and its 76 // TargetWindow'th parent. The expected amount of seconds to have passed is 77 // TargetWindow*BlockFrequency. The target is adjusted in proportion to how 78 // time has passed vs. the expected amount of time to have passed. 79 // 80 // The target is converted to a big.Rat to provide infinite precision 81 // during the calculation. The big.Rat is just the int representation of a 82 // target. 83 timePassed := pb.Block.Timestamp - timestamp 84 expectedTimePassed := types.BlockFrequency * windowSize 85 return big.NewRat(int64(timePassed), int64(expectedTimePassed)) 86 } 87 88 // clampTargetAdjustment returns a clamped version of the base adjustment 89 // value. The clamp keeps the maximum adjustment to ~7x every 2000 blocks. This 90 // ensures that raising and lowering the difficulty requires a minimum amount 91 // of total work, which prevents certain classes of difficulty adjusting 92 // attacks. 93 func clampTargetAdjustment(base *big.Rat) *big.Rat { 94 if base.Cmp(types.MaxAdjustmentUp) > 0 { 95 return types.MaxAdjustmentUp 96 } else if base.Cmp(types.MaxAdjustmentDown) < 0 { 97 return types.MaxAdjustmentDown 98 } 99 return base 100 } 101 102 // setChildTarget computes the target of a blockNode's child. All children of a node 103 // have the same target. 104 func (cs *ConsensusSet) setChildTarget(blockMap *bolt.Bucket, pb *processedBlock) { 105 // Fetch the parent block. 106 var parent processedBlock 107 parentBytes := blockMap.Get(pb.Block.ParentID[:]) 108 err := encoding.Unmarshal(parentBytes, &parent) 109 if build.DEBUG && err != nil { 110 panic(err) 111 } 112 113 if pb.Height%(types.TargetWindow/2) != 0 { 114 pb.ChildTarget = parent.ChildTarget 115 return 116 } 117 adjustment := clampTargetAdjustment(cs.targetAdjustmentBase(blockMap, pb)) 118 adjustedRatTarget := new(big.Rat).Mul(parent.ChildTarget.Rat(), adjustment) 119 pb.ChildTarget = types.RatToTarget(adjustedRatTarget) 120 } 121 122 // newChild creates a blockNode from a block and adds it to the parent's set of 123 // children. The new node is also returned. It necessairly modifies the database 124 func (cs *ConsensusSet) newChild(tx *bolt.Tx, pb *processedBlock, b types.Block) *processedBlock { 125 // Create the child node. 126 childID := b.ID() 127 child := &processedBlock{ 128 Block: b, 129 Height: pb.Height + 1, 130 Depth: pb.childDepth(), 131 } 132 blockMap := tx.Bucket(BlockMap) 133 cs.setChildTarget(blockMap, child) 134 err := blockMap.Put(childID[:], encoding.Marshal(*child)) 135 if build.DEBUG && err != nil { 136 panic(err) 137 } 138 return child 139 }