github.com/marconiprotocol/go-methereum-lite@v0.0.0-20190918214227-3cd8b06fcf99/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "runtime" 30 "strconv" 31 "sync" 32 "sync/atomic" 33 "time" 34 "unsafe" 35 36 mmap "github.com/edsrzf/mmap-go" 37 "github.com/MarconiProtocol/go-methereum-lite/common" 38 "github.com/MarconiProtocol/go-methereum-lite/consensus" 39 "github.com/MarconiProtocol/go-methereum-lite/core/types" 40 "github.com/MarconiProtocol/go-methereum-lite/log" 41 "github.com/MarconiProtocol/go-methereum-lite/metrics" 42 "github.com/MarconiProtocol/go-methereum-lite/rpc" 43 "github.com/hashicorp/golang-lru/simplelru" 44 ) 45 46 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 47 48 var ( 49 // two256 is a big integer representing 2^256 50 two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 51 52 // sharedEthash is a full instance that can be shared between multiple users. 53 sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil, false) 54 55 // algorithmRevision is the data structure version used for file naming. 56 algorithmRevision = 23 57 58 // dumpMagic is a dataset dump header to sanity check a data dump. 59 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 60 ) 61 62 // isLittleEndian returns whether the local system is running in little or big 63 // endian byte order. 64 func isLittleEndian() bool { 65 n := uint32(0x01020304) 66 return *(*byte)(unsafe.Pointer(&n)) == 0x04 67 } 68 69 // memoryMap tries to memory map a file of uint32s for read only access. 70 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 71 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 72 if err != nil { 73 return nil, nil, nil, err 74 } 75 mem, buffer, err := memoryMapFile(file, false) 76 if err != nil { 77 file.Close() 78 return nil, nil, nil, err 79 } 80 for i, magic := range dumpMagic { 81 if buffer[i] != magic { 82 mem.Unmap() 83 file.Close() 84 return nil, nil, nil, ErrInvalidDumpMagic 85 } 86 } 87 return file, mem, buffer[len(dumpMagic):], err 88 } 89 90 // memoryMapFile tries to memory map an already opened file descriptor. 91 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 92 // Try to memory map the file 93 flag := mmap.RDONLY 94 if write { 95 flag = mmap.RDWR 96 } 97 mem, err := mmap.Map(file, flag, 0) 98 if err != nil { 99 return nil, nil, err 100 } 101 // Yay, we managed to memory map the file, here be dragons 102 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 103 header.Len /= 4 104 header.Cap /= 4 105 106 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 107 } 108 109 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 110 // access, fill it with the data from a generator and then move it into the final 111 // path requested. 112 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 113 // Ensure the data folder exists 114 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 115 return nil, nil, nil, err 116 } 117 // Create a huge temporary empty file to fill with data 118 temp := path + "." + strconv.Itoa(rand.Int()) 119 120 dump, err := os.Create(temp) 121 if err != nil { 122 return nil, nil, nil, err 123 } 124 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 125 return nil, nil, nil, err 126 } 127 // Memory map the file for writing and fill it with the generator 128 mem, buffer, err := memoryMapFile(dump, true) 129 if err != nil { 130 dump.Close() 131 return nil, nil, nil, err 132 } 133 copy(buffer, dumpMagic) 134 135 data := buffer[len(dumpMagic):] 136 generator(data) 137 138 if err := mem.Unmap(); err != nil { 139 return nil, nil, nil, err 140 } 141 if err := dump.Close(); err != nil { 142 return nil, nil, nil, err 143 } 144 if err := os.Rename(temp, path); err != nil { 145 return nil, nil, nil, err 146 } 147 return memoryMap(path) 148 } 149 150 // lru tracks caches or datasets by their last use time, keeping at most N of them. 151 type lru struct { 152 what string 153 new func(epoch uint64) interface{} 154 mu sync.Mutex 155 // Items are kept in a LRU cache, but there is a special case: 156 // We always keep an item for (highest seen epoch) + 1 as the 'future item'. 157 cache *simplelru.LRU 158 future uint64 159 futureItem interface{} 160 } 161 162 // newlru create a new least-recently-used cache for either the verification caches 163 // or the mining datasets. 164 func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { 165 if maxItems <= 0 { 166 maxItems = 1 167 } 168 cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { 169 log.Trace("Evicted ethash "+what, "epoch", key) 170 }) 171 return &lru{what: what, new: new, cache: cache} 172 } 173 174 // get retrieves or creates an item for the given epoch. The first return value is always 175 // non-nil. The second return value is non-nil if lru thinks that an item will be useful in 176 // the near future. 177 func (lru *lru) get(epoch uint64) (item, future interface{}) { 178 lru.mu.Lock() 179 defer lru.mu.Unlock() 180 181 // Get or create the item for the requested epoch. 182 item, ok := lru.cache.Get(epoch) 183 if !ok { 184 if lru.future > 0 && lru.future == epoch { 185 item = lru.futureItem 186 } else { 187 log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) 188 item = lru.new(epoch) 189 } 190 lru.cache.Add(epoch, item) 191 } 192 // Update the 'future item' if epoch is larger than previously seen. 193 if epoch < maxEpoch-1 && lru.future < epoch+1 { 194 log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) 195 future = lru.new(epoch + 1) 196 lru.future = epoch + 1 197 lru.futureItem = future 198 } 199 return item, future 200 } 201 202 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 203 type cache struct { 204 epoch uint64 // Epoch for which this cache is relevant 205 dump *os.File // File descriptor of the memory mapped cache 206 mmap mmap.MMap // Memory map itself to unmap before releasing 207 cache []uint32 // The actual cache data content (may be memory mapped) 208 once sync.Once // Ensures the cache is generated only once 209 } 210 211 // newCache creates a new ethash verification cache and returns it as a plain Go 212 // interface to be usable in an LRU cache. 213 func newCache(epoch uint64) interface{} { 214 return &cache{epoch: epoch} 215 } 216 217 // generate ensures that the cache content is generated before use. 218 func (c *cache) generate(dir string, limit int, test bool) { 219 c.once.Do(func() { 220 size := cacheSize(c.epoch*epochLength + 1) 221 seed := seedHash(c.epoch*epochLength + 1) 222 if test { 223 size = 1024 224 } 225 // If we don't store anything on disk, generate and return. 226 if dir == "" { 227 c.cache = make([]uint32, size/4) 228 generateCache(c.cache, c.epoch, seed) 229 return 230 } 231 // Disk storage is needed, this will get fancy 232 var endian string 233 if !isLittleEndian() { 234 endian = ".be" 235 } 236 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 237 logger := log.New("epoch", c.epoch) 238 239 // We're about to mmap the file, ensure that the mapping is cleaned up when the 240 // cache becomes unused. 241 runtime.SetFinalizer(c, (*cache).finalizer) 242 243 // Try to load the file from disk and memory map it 244 var err error 245 c.dump, c.mmap, c.cache, err = memoryMap(path) 246 if err == nil { 247 logger.Debug("Loaded old ethash cache from disk") 248 return 249 } 250 logger.Debug("Failed to load old ethash cache", "err", err) 251 252 // No previous cache available, create a new cache file to fill 253 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 254 if err != nil { 255 logger.Error("Failed to generate mapped ethash cache", "err", err) 256 257 c.cache = make([]uint32, size/4) 258 generateCache(c.cache, c.epoch, seed) 259 } 260 // Iterate over all previous instances and delete old ones 261 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 262 seed := seedHash(uint64(ep)*epochLength + 1) 263 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 264 os.Remove(path) 265 } 266 }) 267 } 268 269 // finalizer unmaps the memory and closes the file. 270 func (c *cache) finalizer() { 271 if c.mmap != nil { 272 c.mmap.Unmap() 273 c.dump.Close() 274 c.mmap, c.dump = nil, nil 275 } 276 } 277 278 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 279 type dataset struct { 280 epoch uint64 // Epoch for which this cache is relevant 281 dump *os.File // File descriptor of the memory mapped cache 282 mmap mmap.MMap // Memory map itself to unmap before releasing 283 dataset []uint32 // The actual cache data content 284 once sync.Once // Ensures the cache is generated only once 285 done uint32 // Atomic flag to determine generation status 286 } 287 288 // newDataset creates a new ethash mining dataset and returns it as a plain Go 289 // interface to be usable in an LRU cache. 290 func newDataset(epoch uint64) interface{} { 291 return &dataset{epoch: epoch} 292 } 293 294 // generate ensures that the dataset content is generated before use. 295 func (d *dataset) generate(dir string, limit int, test bool) { 296 d.once.Do(func() { 297 // Mark the dataset generated after we're done. This is needed for remote 298 defer atomic.StoreUint32(&d.done, 1) 299 300 csize := cacheSize(d.epoch*epochLength + 1) 301 dsize := datasetSize(d.epoch*epochLength + 1) 302 seed := seedHash(d.epoch*epochLength + 1) 303 if test { 304 csize = 1024 305 dsize = 32 * 1024 306 } 307 // If we don't store anything on disk, generate and return 308 if dir == "" { 309 cache := make([]uint32, csize/4) 310 generateCache(cache, d.epoch, seed) 311 312 d.dataset = make([]uint32, dsize/4) 313 generateDataset(d.dataset, d.epoch, cache) 314 315 return 316 } 317 // Disk storage is needed, this will get fancy 318 var endian string 319 if !isLittleEndian() { 320 endian = ".be" 321 } 322 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 323 logger := log.New("epoch", d.epoch) 324 325 // We're about to mmap the file, ensure that the mapping is cleaned up when the 326 // cache becomes unused. 327 runtime.SetFinalizer(d, (*dataset).finalizer) 328 329 // Try to load the file from disk and memory map it 330 var err error 331 d.dump, d.mmap, d.dataset, err = memoryMap(path) 332 if err == nil { 333 logger.Debug("Loaded old ethash dataset from disk") 334 return 335 } 336 logger.Debug("Failed to load old ethash dataset", "err", err) 337 338 // No previous dataset available, create a new dataset file to fill 339 cache := make([]uint32, csize/4) 340 generateCache(cache, d.epoch, seed) 341 342 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 343 if err != nil { 344 logger.Error("Failed to generate mapped ethash dataset", "err", err) 345 346 d.dataset = make([]uint32, dsize/2) 347 generateDataset(d.dataset, d.epoch, cache) 348 } 349 // Iterate over all previous instances and delete old ones 350 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 351 seed := seedHash(uint64(ep)*epochLength + 1) 352 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 353 os.Remove(path) 354 } 355 }) 356 } 357 358 // generated returns whether this particular dataset finished generating already 359 // or not (it may not have been started at all). This is useful for remote miners 360 // to default to verification caches instead of blocking on DAG generations. 361 func (d *dataset) generated() bool { 362 return atomic.LoadUint32(&d.done) == 1 363 } 364 365 // finalizer closes any file handlers and memory maps open. 366 func (d *dataset) finalizer() { 367 if d.mmap != nil { 368 d.mmap.Unmap() 369 d.dump.Close() 370 d.mmap, d.dump = nil, nil 371 } 372 } 373 374 // MakeCache generates a new ethash cache and optionally stores it to disk. 375 func MakeCache(block uint64, dir string) { 376 c := cache{epoch: block / epochLength} 377 c.generate(dir, math.MaxInt32, false) 378 } 379 380 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 381 func MakeDataset(block uint64, dir string) { 382 d := dataset{epoch: block / epochLength} 383 d.generate(dir, math.MaxInt32, false) 384 } 385 386 // Mode defines the type and amount of PoW verification an ethash engine makes. 387 type Mode uint 388 389 const ( 390 ModeNormal Mode = iota 391 ModeShared 392 ModeTest 393 ModeFake 394 ModeFullFake 395 ModeQuickTest 396 ModeCryptonight 397 ) 398 399 // Config are the configuration parameters of the ethash. 400 type Config struct { 401 CacheDir string 402 CachesInMem int 403 CachesOnDisk int 404 DatasetDir string 405 DatasetsInMem int 406 DatasetsOnDisk int 407 PowMode Mode 408 } 409 410 // sealTask wraps a seal block with relative result channel for remote sealer thread. 411 type sealTask struct { 412 block *types.Block 413 results chan<- *types.Block 414 } 415 416 // mineResult wraps the pow solution parameters for the specified block. 417 type mineResult struct { 418 nonce types.BlockNonce 419 mixDigest common.Hash 420 hash common.Hash 421 422 errc chan error 423 } 424 425 // hashrate wraps the hash rate submitted by the remote sealer. 426 type hashrate struct { 427 id common.Hash 428 ping time.Time 429 rate uint64 430 431 done chan struct{} 432 } 433 434 // sealWork wraps a seal work package for remote sealer. 435 type sealWork struct { 436 errc chan error 437 res chan [4]string 438 } 439 440 // Ethash is a consensus engine based on proof-of-work implementing the ethash 441 // algorithm. 442 type Ethash struct { 443 config Config 444 445 caches *lru // In memory caches to avoid regenerating too often 446 datasets *lru // In memory datasets to avoid regenerating too often 447 448 // Mining related fields 449 rand *rand.Rand // Properly seeded random source for nonces 450 threads int // Number of threads to mine on if mining 451 update chan struct{} // Notification channel to update mining parameters 452 hashrate metrics.Meter // Meter tracking the average hashrate 453 454 // Remote sealer related fields 455 workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer 456 fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work 457 submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result 458 fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. 459 submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate 460 461 // The fields below are hooks for testing 462 shared *Ethash // Shared PoW verifier to avoid cache regeneration 463 fakeFail uint64 // Block number which fails PoW check even in fake mode 464 fakeDelay time.Duration // Time delay to sleep for before returning from verify 465 466 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 467 closeOnce sync.Once // Ensures exit channel will not be closed twice. 468 exitCh chan chan error // Notification channel to exiting backend threads 469 } 470 471 // New creates a full sized ethash PoW scheme and starts a background thread for 472 // remote mining, also optionally notifying a batch of remote services of new work 473 // packages. 474 func New(config Config, notify []string, noverify bool) *Ethash { 475 if config.CachesInMem <= 0 { 476 log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 477 config.CachesInMem = 1 478 } 479 if config.CacheDir != "" && config.CachesOnDisk > 0 { 480 log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 481 } 482 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 483 log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 484 } 485 ethash := &Ethash{ 486 config: config, 487 caches: newlru("cache", config.CachesInMem, newCache), 488 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 489 update: make(chan struct{}), 490 hashrate: metrics.NewMeterForced(), 491 workCh: make(chan *sealTask), 492 fetchWorkCh: make(chan *sealWork), 493 submitWorkCh: make(chan *mineResult), 494 fetchRateCh: make(chan chan uint64), 495 submitRateCh: make(chan *hashrate), 496 exitCh: make(chan chan error), 497 } 498 go ethash.remote(notify, noverify) 499 return ethash 500 } 501 502 // NewQuickTest creates a new simple and fast PoW scheme and starts a 503 // background thread for remote mining, also optionally notifying a 504 // batch of remote services of new work packages. 505 func NewQuickTest(notify []string, noverify bool) *Ethash { 506 ethash := &Ethash{ 507 config: Config{ 508 PowMode: ModeQuickTest, 509 }, 510 update: make(chan struct{}), 511 hashrate: metrics.NewMeter(), 512 workCh: make(chan *sealTask), 513 fetchWorkCh: make(chan *sealWork), 514 submitWorkCh: make(chan *mineResult), 515 fetchRateCh: make(chan chan uint64), 516 submitRateCh: make(chan *hashrate), 517 exitCh: make(chan chan error), 518 } 519 go ethash.remote(notify, noverify) 520 return ethash 521 } 522 523 // NewCryptonight creates a new cryptonight PoW scheme and starts a 524 // background thread for remote mining, also optionally notifying a 525 // batch of remote services of new work packages. 526 func NewCryptonight(notify []string, noverify bool) *Ethash { 527 ethash := &Ethash{ 528 config: Config{ 529 PowMode: ModeCryptonight, 530 }, 531 update: make(chan struct{}), 532 hashrate: metrics.NewMeter(), 533 workCh: make(chan *sealTask), 534 fetchWorkCh: make(chan *sealWork), 535 submitWorkCh: make(chan *mineResult), 536 fetchRateCh: make(chan chan uint64), 537 submitRateCh: make(chan *hashrate), 538 exitCh: make(chan chan error), 539 } 540 go ethash.remote(notify, noverify) 541 return ethash 542 } 543 544 // NewTester creates a small sized ethash PoW scheme useful only for testing 545 // purposes. 546 func NewTester(notify []string, noverify bool) *Ethash { 547 ethash := &Ethash{ 548 config: Config{PowMode: ModeTest}, 549 caches: newlru("cache", 1, newCache), 550 datasets: newlru("dataset", 1, newDataset), 551 update: make(chan struct{}), 552 hashrate: metrics.NewMeterForced(), 553 workCh: make(chan *sealTask), 554 fetchWorkCh: make(chan *sealWork), 555 submitWorkCh: make(chan *mineResult), 556 fetchRateCh: make(chan chan uint64), 557 submitRateCh: make(chan *hashrate), 558 exitCh: make(chan chan error), 559 } 560 go ethash.remote(notify, noverify) 561 return ethash 562 } 563 564 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 565 // all blocks' seal as valid, though they still have to conform to the Ethereum 566 // consensus rules. 567 func NewFaker() *Ethash { 568 return &Ethash{ 569 config: Config{ 570 PowMode: ModeFake, 571 }, 572 } 573 } 574 575 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 576 // accepts all blocks as valid apart from the single one specified, though they 577 // still have to conform to the Ethereum consensus rules. 578 func NewFakeFailer(fail uint64) *Ethash { 579 return &Ethash{ 580 config: Config{ 581 PowMode: ModeFake, 582 }, 583 fakeFail: fail, 584 } 585 } 586 587 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 588 // accepts all blocks as valid, but delays verifications by some time, though 589 // they still have to conform to the Ethereum consensus rules. 590 func NewFakeDelayer(delay time.Duration) *Ethash { 591 return &Ethash{ 592 config: Config{ 593 PowMode: ModeFake, 594 }, 595 fakeDelay: delay, 596 } 597 } 598 599 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 600 // accepts all blocks as valid, without checking any consensus rules whatsoever. 601 func NewFullFaker() *Ethash { 602 return &Ethash{ 603 config: Config{ 604 PowMode: ModeFullFake, 605 }, 606 } 607 } 608 609 // NewShared creates a full sized ethash PoW shared between all requesters running 610 // in the same process. 611 func NewShared() *Ethash { 612 return &Ethash{shared: sharedEthash} 613 } 614 615 // Close closes the exit channel to notify all backend threads exiting. 616 func (ethash *Ethash) Close() error { 617 var err error 618 ethash.closeOnce.Do(func() { 619 // Short circuit if the exit channel is not allocated. 620 if ethash.exitCh == nil { 621 return 622 } 623 errc := make(chan error) 624 ethash.exitCh <- errc 625 err = <-errc 626 close(ethash.exitCh) 627 }) 628 return err 629 } 630 631 // cache tries to retrieve a verification cache for the specified block number 632 // by first checking against a list of in-memory caches, then against caches 633 // stored on disk, and finally generating one if none can be found. 634 func (ethash *Ethash) cache(block uint64) *cache { 635 epoch := block / epochLength 636 currentI, futureI := ethash.caches.get(epoch) 637 current := currentI.(*cache) 638 639 // Wait for generation finish. 640 current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 641 642 // If we need a new future cache, now's a good time to regenerate it. 643 if futureI != nil { 644 future := futureI.(*cache) 645 go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 646 } 647 return current 648 } 649 650 // dataset tries to retrieve a mining dataset for the specified block number 651 // by first checking against a list of in-memory datasets, then against DAGs 652 // stored on disk, and finally generating one if none can be found. 653 // 654 // If async is specified, not only the future but the current DAG is also 655 // generates on a background thread. 656 func (ethash *Ethash) dataset(block uint64, async bool) *dataset { 657 // Retrieve the requested ethash dataset 658 epoch := block / epochLength 659 currentI, futureI := ethash.datasets.get(epoch) 660 current := currentI.(*dataset) 661 662 // If async is specified, generate everything in a background thread 663 if async && !current.generated() { 664 go func() { 665 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 666 667 if futureI != nil { 668 future := futureI.(*dataset) 669 future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 670 } 671 }() 672 } else { 673 // Either blocking generation was requested, or already done 674 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 675 676 if futureI != nil { 677 future := futureI.(*dataset) 678 go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 679 } 680 } 681 return current 682 } 683 684 // Threads returns the number of mining threads currently enabled. This doesn't 685 // necessarily mean that mining is running! 686 func (ethash *Ethash) Threads() int { 687 ethash.lock.Lock() 688 defer ethash.lock.Unlock() 689 690 return ethash.threads 691 } 692 693 // SetThreads updates the number of mining threads currently enabled. Calling 694 // this method does not start mining, only sets the thread count. If zero is 695 // specified, the miner will use all cores of the machine. Setting a thread 696 // count below zero is allowed and will cause the miner to idle, without any 697 // work being done. 698 func (ethash *Ethash) SetThreads(threads int) { 699 ethash.lock.Lock() 700 defer ethash.lock.Unlock() 701 702 // If we're running a shared PoW, set the thread count on that instead 703 if ethash.shared != nil { 704 ethash.shared.SetThreads(threads) 705 return 706 } 707 // Update the threads and ping any running seal to pull in any changes 708 ethash.threads = threads 709 select { 710 case ethash.update <- struct{}{}: 711 default: 712 } 713 } 714 715 // Hashrate implements PoW, returning the measured rate of the search invocations 716 // per second over the last minute. 717 // Note the returned hashrate includes local hashrate, but also includes the total 718 // hashrate of all remote miner. 719 func (ethash *Ethash) Hashrate() float64 { 720 // Short circuit if we are run the ethash in normal/test mode. 721 if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest { 722 return ethash.hashrate.Rate1() 723 } 724 var res = make(chan uint64, 1) 725 726 select { 727 case ethash.fetchRateCh <- res: 728 case <-ethash.exitCh: 729 // Return local hashrate only if ethash is stopped. 730 return ethash.hashrate.Rate1() 731 } 732 733 // Gather total submitted hash rate of remote sealers. 734 return ethash.hashrate.Rate1() + float64(<-res) 735 } 736 737 // APIs implements consensus.Engine, returning the user facing RPC APIs. 738 func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { 739 // In order to ensure backward compatibility, we exposes ethash RPC APIs 740 // to both eth and ethash namespaces. 741 return []rpc.API{ 742 { 743 Namespace: "eth", 744 Version: "1.0", 745 Service: &API{ethash}, 746 Public: true, 747 }, 748 { 749 Namespace: "ethash", 750 Version: "1.0", 751 Service: &API{ethash}, 752 Public: true, 753 }, 754 } 755 } 756 757 // SeedHash is the seed to use for generating a verification cache and the mining 758 // dataset. 759 func SeedHash(block uint64) []byte { 760 return seedHash(block) 761 }