github.com/luckypickle/go-ethereum-vet@v1.14.2/consensus/ethash/sealer.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package ethash 18 19 import ( 20 "bytes" 21 crand "crypto/rand" 22 "encoding/json" 23 "errors" 24 "math" 25 "math/big" 26 "math/rand" 27 "net/http" 28 "runtime" 29 "sync" 30 "time" 31 32 "github.com/luckypickle/go-ethereum-vet/common" 33 "github.com/luckypickle/go-ethereum-vet/consensus" 34 "github.com/luckypickle/go-ethereum-vet/core/types" 35 "github.com/luckypickle/go-ethereum-vet/log" 36 ) 37 38 var ( 39 errNoMiningWork = errors.New("no mining work available yet") 40 errInvalidSealResult = errors.New("invalid or stale proof-of-work solution") 41 ) 42 43 // Seal implements consensus.Engine, attempting to find a nonce that satisfies 44 // the block's difficulty requirements. 45 func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) { 46 // If we're running a fake PoW, simply return a 0 nonce immediately 47 if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { 48 header := block.Header() 49 header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{} 50 return block.WithSeal(header), nil 51 } 52 // If we're running a shared PoW, delegate sealing to it 53 if ethash.shared != nil { 54 return ethash.shared.Seal(chain, block, stop) 55 } 56 // Create a runner and the multiple search threads it directs 57 abort := make(chan struct{}) 58 59 ethash.lock.Lock() 60 threads := ethash.threads 61 if ethash.rand == nil { 62 seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) 63 if err != nil { 64 ethash.lock.Unlock() 65 return nil, err 66 } 67 ethash.rand = rand.New(rand.NewSource(seed.Int64())) 68 } 69 ethash.lock.Unlock() 70 if threads == 0 { 71 threads = runtime.NumCPU() 72 } 73 if threads < 0 { 74 threads = 0 // Allows disabling local mining without extra logic around local/remote 75 } 76 // Push new work to remote sealer 77 if ethash.workCh != nil { 78 ethash.workCh <- block 79 } 80 var pend sync.WaitGroup 81 for i := 0; i < threads; i++ { 82 pend.Add(1) 83 go func(id int, nonce uint64) { 84 defer pend.Done() 85 ethash.mine(block, id, nonce, abort, ethash.resultCh) 86 }(i, uint64(ethash.rand.Int63())) 87 } 88 // Wait until sealing is terminated or a nonce is found 89 var result *types.Block 90 select { 91 case <-stop: 92 // Outside abort, stop all miner threads 93 close(abort) 94 case result = <-ethash.resultCh: 95 // One of the threads found a block, abort all others 96 close(abort) 97 case <-ethash.update: 98 // Thread count was changed on user request, restart 99 close(abort) 100 pend.Wait() 101 return ethash.Seal(chain, block, stop) 102 } 103 // Wait for all miners to terminate and return the block 104 pend.Wait() 105 return result, nil 106 } 107 108 // mine is the actual proof-of-work miner that searches for a nonce starting from 109 // seed that results in correct final block difficulty. 110 func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) { 111 // Extract some data from the header 112 var ( 113 header = block.Header() 114 hash = header.HashNoNonce().Bytes() 115 target = new(big.Int).Div(two256, header.Difficulty) 116 number = header.Number.Uint64() 117 dataset = ethash.dataset(number, false) 118 ) 119 // Start generating random nonces until we abort or find a good one 120 var ( 121 attempts = int64(0) 122 nonce = seed 123 ) 124 logger := log.New("miner", id) 125 logger.Trace("Started ethash search for new nonces", "seed", seed) 126 search: 127 for { 128 select { 129 case <-abort: 130 // Mining terminated, update stats and abort 131 logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed) 132 ethash.hashrate.Mark(attempts) 133 break search 134 135 default: 136 // We don't have to update hash rate on every nonce, so update after after 2^X nonces 137 attempts++ 138 if (attempts % (1 << 15)) == 0 { 139 ethash.hashrate.Mark(attempts) 140 attempts = 0 141 } 142 // Compute the PoW value of this nonce 143 digest, result := hashimotoFull(dataset.dataset, hash, nonce) 144 if new(big.Int).SetBytes(result).Cmp(target) <= 0 { 145 // Correct nonce found, create a new header with it 146 header = types.CopyHeader(header) 147 header.Nonce = types.EncodeNonce(nonce) 148 header.MixDigest = common.BytesToHash(digest) 149 150 // Seal and return a block (if still needed) 151 select { 152 case found <- block.WithSeal(header): 153 logger.Trace("Ethash nonce found and reported", "attempts", nonce-seed, "nonce", nonce) 154 case <-abort: 155 logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce) 156 } 157 break search 158 } 159 nonce++ 160 } 161 } 162 // Datasets are unmapped in a finalizer. Ensure that the dataset stays live 163 // during sealing so it's not unmapped while being read. 164 runtime.KeepAlive(dataset) 165 } 166 167 // remote is a standalone goroutine to handle remote mining related stuff. 168 func (ethash *Ethash) remote(notify []string) { 169 var ( 170 works = make(map[common.Hash]*types.Block) 171 rates = make(map[common.Hash]hashrate) 172 173 currentBlock *types.Block 174 currentWork [3]string 175 176 notifyTransport = &http.Transport{} 177 notifyClient = &http.Client{ 178 Transport: notifyTransport, 179 Timeout: time.Second, 180 } 181 notifyReqs = make([]*http.Request, len(notify)) 182 ) 183 // notifyWork notifies all the specified mining endpoints of the availability of 184 // new work to be processed. 185 notifyWork := func() { 186 work := currentWork 187 blob, _ := json.Marshal(work) 188 189 for i, url := range notify { 190 // Terminate any previously pending request and create the new work 191 if notifyReqs[i] != nil { 192 notifyTransport.CancelRequest(notifyReqs[i]) 193 } 194 notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob)) 195 notifyReqs[i].Header.Set("Content-Type", "application/json") 196 197 // Push the new work concurrently to all the remote nodes 198 go func(req *http.Request, url string) { 199 res, err := notifyClient.Do(req) 200 if err != nil { 201 log.Warn("Failed to notify remote miner", "err", err) 202 } else { 203 log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2]) 204 res.Body.Close() 205 } 206 }(notifyReqs[i], url) 207 } 208 } 209 // makeWork creates a work package for external miner. 210 // 211 // The work package consists of 3 strings: 212 // result[0], 32 bytes hex encoded current block header pow-hash 213 // result[1], 32 bytes hex encoded seed hash used for DAG 214 // result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty 215 makeWork := func(block *types.Block) { 216 hash := block.HashNoNonce() 217 218 currentWork[0] = hash.Hex() 219 currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() 220 currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() 221 222 // Trace the seal work fetched by remote sealer. 223 currentBlock = block 224 works[hash] = block 225 } 226 // submitWork verifies the submitted pow solution, returning 227 // whether the solution was accepted or not (not can be both a bad pow as well as 228 // any other error, like no pending work or stale mining result). 229 submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, hash common.Hash) bool { 230 // Make sure the work submitted is present 231 block := works[hash] 232 if block == nil { 233 log.Info("Work submitted but none pending", "hash", hash) 234 return false 235 } 236 // Verify the correctness of submitted result. 237 header := block.Header() 238 header.Nonce = nonce 239 header.MixDigest = mixDigest 240 241 start := time.Now() 242 if err := ethash.verifySeal(nil, header, true); err != nil { 243 log.Warn("Invalid proof-of-work submitted", "hash", hash, "elapsed", time.Since(start), "err", err) 244 return false 245 } 246 // Make sure the result channel is created. 247 if ethash.resultCh == nil { 248 log.Warn("Ethash result channel is empty, submitted mining result is rejected") 249 return false 250 } 251 log.Trace("Verified correct proof-of-work", "hash", hash, "elapsed", time.Since(start)) 252 253 // Solutions seems to be valid, return to the miner and notify acceptance. 254 select { 255 case ethash.resultCh <- block.WithSeal(header): 256 delete(works, hash) 257 return true 258 default: 259 log.Info("Work submitted is stale", "hash", hash) 260 return false 261 } 262 } 263 264 ticker := time.NewTicker(5 * time.Second) 265 defer ticker.Stop() 266 267 for { 268 select { 269 case block := <-ethash.workCh: 270 if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() { 271 // Start new round mining, throw out all previous work. 272 works = make(map[common.Hash]*types.Block) 273 } 274 // Update current work with new received block. 275 // Note same work can be past twice, happens when changing CPU threads. 276 makeWork(block) 277 278 // Notify and requested URLs of the new work availability 279 notifyWork() 280 281 case work := <-ethash.fetchWorkCh: 282 // Return current mining work to remote miner. 283 if currentBlock == nil { 284 work.errc <- errNoMiningWork 285 } else { 286 work.res <- currentWork 287 } 288 289 case result := <-ethash.submitWorkCh: 290 // Verify submitted PoW solution based on maintained mining blocks. 291 if submitWork(result.nonce, result.mixDigest, result.hash) { 292 result.errc <- nil 293 } else { 294 result.errc <- errInvalidSealResult 295 } 296 297 case result := <-ethash.submitRateCh: 298 // Trace remote sealer's hash rate by submitted value. 299 rates[result.id] = hashrate{rate: result.rate, ping: time.Now()} 300 close(result.done) 301 302 case req := <-ethash.fetchRateCh: 303 // Gather all hash rate submitted by remote sealer. 304 var total uint64 305 for _, rate := range rates { 306 // this could overflow 307 total += rate.rate 308 } 309 req <- total 310 311 case <-ticker.C: 312 // Clear stale submitted hash rate. 313 for id, rate := range rates { 314 if time.Since(rate.ping) > 10*time.Second { 315 delete(rates, id) 316 } 317 } 318 319 case errc := <-ethash.exitCh: 320 // Exit remote loop if ethash is closed and return relevant error. 321 errc <- nil 322 log.Trace("Ethash remote sealer is exiting") 323 return 324 } 325 } 326 }