github.com/filecoin-project/specs-actors/v4@v4.0.2/actors/builtin/miner/deadline_assignment.go (about) 1 package miner 2 3 import ( 4 "container/heap" 5 6 "golang.org/x/xerrors" 7 ) 8 9 // Helper types for deadline assignment. 10 type deadlineAssignmentInfo struct { 11 index int 12 liveSectors uint64 13 totalSectors uint64 14 } 15 16 func (dai *deadlineAssignmentInfo) partitionsAfterAssignment(partitionSize uint64) uint64 { 17 sectorCount := dai.totalSectors + 1 // after assignment 18 fullPartitions := sectorCount / partitionSize 19 if (sectorCount % partitionSize) == 0 { 20 return fullPartitions 21 } 22 return fullPartitions + 1 // +1 for partial partition. 23 } 24 25 func (dai *deadlineAssignmentInfo) compactPartitionsAfterAssignment(partitionSize uint64) uint64 { 26 sectorCount := dai.liveSectors + 1 // after assignment 27 fullPartitions := sectorCount / partitionSize 28 if (sectorCount % partitionSize) == 0 { 29 return fullPartitions 30 } 31 return fullPartitions + 1 // +1 for partial partition. 32 } 33 34 func (dai *deadlineAssignmentInfo) isFullNow(partitionSize uint64) bool { 35 return (dai.totalSectors % partitionSize) == 0 36 } 37 38 func (dai *deadlineAssignmentInfo) maxPartitionsReached(partitionSize, maxPartitions uint64) bool { 39 return dai.totalSectors >= partitionSize*maxPartitions 40 } 41 42 type deadlineAssignmentHeap struct { 43 maxPartitions uint64 44 partitionSize uint64 45 deadlines []*deadlineAssignmentInfo 46 } 47 48 func (dah *deadlineAssignmentHeap) Len() int { 49 return len(dah.deadlines) 50 } 51 52 func (dah *deadlineAssignmentHeap) Swap(i, j int) { 53 dah.deadlines[i], dah.deadlines[j] = dah.deadlines[j], dah.deadlines[i] 54 } 55 56 func (dah *deadlineAssignmentHeap) Less(i, j int) bool { 57 a, b := dah.deadlines[i], dah.deadlines[j] 58 59 // If one of the deadlines has already reached it's limit for the maximum number of partitions and 60 // the other hasn't, we directly pick the deadline that hasn't reached it's limit. 61 aMaxPartitionsreached := a.maxPartitionsReached(dah.partitionSize, dah.maxPartitions) 62 bMaxPartitionsReached := b.maxPartitionsReached(dah.partitionSize, dah.maxPartitions) 63 if aMaxPartitionsreached != bMaxPartitionsReached { 64 return !aMaxPartitionsreached 65 } 66 67 // Otherwise:- 68 // When assigning partitions to deadlines, we're trying to optimize the 69 // following: 70 // 71 // First, avoid increasing the maximum number of partitions in any 72 // deadline, across all deadlines, after compaction. This would 73 // necessitate buying a new GPU. 74 // 75 // Second, avoid forcing the miner to repeatedly compact partitions. A 76 // miner would be "forced" to compact a partition when a the number of 77 // partitions in any given deadline goes above the current maximum 78 // number of partitions across all deadlines, and compacting that 79 // deadline would then reduce the number of partitions, reducing the 80 // maximum. 81 // 82 // At the moment, the only "forced" compaction happens when either: 83 // 84 // 1. Assignment of the sector into any deadline would force a 85 // compaction. 86 // 2. The chosen deadline has at least one full partition's worth of 87 // terminated sectors and at least one fewer partition (after 88 // compaction) than any other deadline. 89 // 90 // Third, we attempt to assign "runs" of sectors to the same partition 91 // to reduce the size of the bitfields. 92 // 93 // Finally, we try to balance the number of sectors (thus partitions) 94 // assigned to any given deadline over time. 95 96 // Summary: 97 // 98 // 1. Assign to the deadline that will have the _least_ number of 99 // post-compaction partitions (after sector assignment). 100 // 2. Assign to the deadline that will have the _least_ number of 101 // pre-compaction partitions (after sector assignment). 102 // 3. Assign to a deadline with a non-full partition. 103 // - If both have non-full partitions, assign to the most full one (stable assortment). 104 // 4. Assign to the deadline with the least number of live sectors. 105 // 5. Assign sectors to the deadline with the lowest index first. 106 107 // If one deadline would end up with fewer partitions (after 108 // compacting), assign to that one. This ensures we keep the maximum 109 // number of partitions in any given deadline to a minimum. 110 // 111 // Technically, this could increase the maximum number of partitions 112 // before compaction. However, that can only happen if the deadline in 113 // question could save an entire partition by compacting. At that point, 114 // the miner should compact the deadline. 115 aCompactPartitionsAfterAssignment := a.compactPartitionsAfterAssignment(dah.partitionSize) 116 bCompactPartitionsAfterAssignment := b.compactPartitionsAfterAssignment(dah.partitionSize) 117 if aCompactPartitionsAfterAssignment != bCompactPartitionsAfterAssignment { 118 return aCompactPartitionsAfterAssignment < bCompactPartitionsAfterAssignment 119 } 120 121 // If, after assignment, neither deadline would have fewer 122 // post-compaction partitions, assign to the deadline with the fewest 123 // pre-compaction partitions (after assignment). This will put off 124 // compaction as long as possible. 125 aPartitionsAfterAssignment := a.partitionsAfterAssignment(dah.partitionSize) 126 bPartitionsAfterAssignment := b.partitionsAfterAssignment(dah.partitionSize) 127 if aPartitionsAfterAssignment != bPartitionsAfterAssignment { 128 return aPartitionsAfterAssignment < bPartitionsAfterAssignment 129 } 130 131 // Ok, we'll end up with the same number of partitions any which way we 132 // go. Try to fill up a partition instead of opening a new one. 133 aIsFullNow := a.isFullNow(dah.partitionSize) 134 bIsFullNow := b.isFullNow(dah.partitionSize) 135 if aIsFullNow != bIsFullNow { 136 return !aIsFullNow 137 } 138 139 // Either we have two open partitions, or neither deadline has an open 140 // partition. 141 142 // If we have two open partitions, fill the deadline with the most-full 143 // open partition. This helps us assign runs of sequential sectors into 144 // the same partition. 145 if !aIsFullNow && !bIsFullNow { 146 if a.totalSectors != b.totalSectors { 147 return a.totalSectors > b.totalSectors 148 } 149 } 150 151 // Otherwise, assign to the deadline with the least live sectors. This 152 // will break the tie in one of the two immediately preceding 153 // conditions. 154 if a.liveSectors != b.liveSectors { 155 return a.liveSectors < b.liveSectors 156 } 157 158 // Finally, fallback on the deadline index. 159 // TODO: Randomize by index instead of simply sorting. 160 // https://github.com/filecoin-project/specs-actors/issues/432 161 return a.index < b.index 162 } 163 164 func (dah *deadlineAssignmentHeap) Push(x interface{}) { 165 dah.deadlines = append(dah.deadlines, x.(*deadlineAssignmentInfo)) 166 } 167 168 func (dah *deadlineAssignmentHeap) Pop() interface{} { 169 last := dah.deadlines[len(dah.deadlines)-1] 170 dah.deadlines[len(dah.deadlines)-1] = nil 171 dah.deadlines = dah.deadlines[:len(dah.deadlines)-1] 172 return last 173 } 174 175 // Assigns partitions to deadlines, first filling partial partitions, then 176 // adding new partitions to deadlines with the fewest live sectors. 177 func assignDeadlines( 178 maxPartitions uint64, 179 partitionSize uint64, 180 deadlines *[WPoStPeriodDeadlines]*Deadline, 181 sectors []*SectorOnChainInfo, 182 ) (changes [WPoStPeriodDeadlines][]*SectorOnChainInfo, err error) { 183 // Build a heap 184 dlHeap := deadlineAssignmentHeap{ 185 maxPartitions: maxPartitions, 186 partitionSize: partitionSize, 187 deadlines: make([]*deadlineAssignmentInfo, 0, len(deadlines)), 188 } 189 190 for dlIdx, dl := range deadlines { 191 if dl != nil { 192 dlHeap.deadlines = append(dlHeap.deadlines, &deadlineAssignmentInfo{ 193 index: dlIdx, 194 liveSectors: dl.LiveSectors, 195 totalSectors: dl.TotalSectors, 196 }) 197 } 198 } 199 200 heap.Init(&dlHeap) 201 202 // Assign sectors to deadlines. 203 for _, sector := range sectors { 204 info := dlHeap.deadlines[0] 205 206 if info.maxPartitionsReached(partitionSize, maxPartitions) { 207 return changes, xerrors.Errorf("maxPartitions limit %d reached for all deadlines", maxPartitions) 208 } 209 210 changes[info.index] = append(changes[info.index], sector) 211 info.liveSectors++ 212 info.totalSectors++ 213 214 // Update heap. 215 heap.Fix(&dlHeap, 0) 216 } 217 218 return changes, nil 219 }