github.com/authcall/reference-optimistic-geth@v0.0.0-20220816224302-06313bfeb8d2/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "runtime" 30 "strconv" 31 "sync" 32 "sync/atomic" 33 "time" 34 "unsafe" 35 36 "github.com/edsrzf/mmap-go" 37 "github.com/ethereum/go-ethereum/consensus" 38 "github.com/ethereum/go-ethereum/log" 39 "github.com/ethereum/go-ethereum/metrics" 40 "github.com/ethereum/go-ethereum/rpc" 41 "github.com/hashicorp/golang-lru/simplelru" 42 ) 43 44 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 45 46 var ( 47 // two256 is a big integer representing 2^256 48 two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 49 50 // sharedEthash is a full instance that can be shared between multiple users. 51 sharedEthash *Ethash 52 53 // algorithmRevision is the data structure version used for file naming. 54 algorithmRevision = 23 55 56 // dumpMagic is a dataset dump header to sanity check a data dump. 57 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 58 ) 59 60 func init() { 61 sharedConfig := Config{ 62 PowMode: ModeNormal, 63 CachesInMem: 3, 64 DatasetsInMem: 1, 65 } 66 sharedEthash = New(sharedConfig, nil, false) 67 } 68 69 // isLittleEndian returns whether the local system is running in little or big 70 // endian byte order. 71 func isLittleEndian() bool { 72 n := uint32(0x01020304) 73 return *(*byte)(unsafe.Pointer(&n)) == 0x04 74 } 75 76 // memoryMap tries to memory map a file of uint32s for read only access. 77 func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) { 78 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 79 if err != nil { 80 return nil, nil, nil, err 81 } 82 mem, buffer, err := memoryMapFile(file, false) 83 if err != nil { 84 file.Close() 85 return nil, nil, nil, err 86 } 87 for i, magic := range dumpMagic { 88 if buffer[i] != magic { 89 mem.Unmap() 90 file.Close() 91 return nil, nil, nil, ErrInvalidDumpMagic 92 } 93 } 94 if lock { 95 if err := mem.Lock(); err != nil { 96 mem.Unmap() 97 file.Close() 98 return nil, nil, nil, err 99 } 100 } 101 return file, mem, buffer[len(dumpMagic):], err 102 } 103 104 // memoryMapFile tries to memory map an already opened file descriptor. 105 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 106 // Try to memory map the file 107 flag := mmap.RDONLY 108 if write { 109 flag = mmap.RDWR 110 } 111 mem, err := mmap.Map(file, flag, 0) 112 if err != nil { 113 return nil, nil, err 114 } 115 // The file is now memory-mapped. Create a []uint32 view of the file. 116 var view []uint32 117 header := (*reflect.SliceHeader)(unsafe.Pointer(&view)) 118 header.Data = (*reflect.SliceHeader)(unsafe.Pointer(&mem)).Data 119 header.Cap = len(mem) / 4 120 header.Len = header.Cap 121 return mem, view, nil 122 } 123 124 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 125 // access, fill it with the data from a generator and then move it into the final 126 // path requested. 127 func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 128 // Ensure the data folder exists 129 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 130 return nil, nil, nil, err 131 } 132 // Create a huge temporary empty file to fill with data 133 temp := path + "." + strconv.Itoa(rand.Int()) 134 135 dump, err := os.Create(temp) 136 if err != nil { 137 return nil, nil, nil, err 138 } 139 if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil { 140 dump.Close() 141 os.Remove(temp) 142 return nil, nil, nil, err 143 } 144 // Memory map the file for writing and fill it with the generator 145 mem, buffer, err := memoryMapFile(dump, true) 146 if err != nil { 147 dump.Close() 148 os.Remove(temp) 149 return nil, nil, nil, err 150 } 151 copy(buffer, dumpMagic) 152 153 data := buffer[len(dumpMagic):] 154 generator(data) 155 156 if err := mem.Unmap(); err != nil { 157 return nil, nil, nil, err 158 } 159 if err := dump.Close(); err != nil { 160 return nil, nil, nil, err 161 } 162 if err := os.Rename(temp, path); err != nil { 163 return nil, nil, nil, err 164 } 165 return memoryMap(path, lock) 166 } 167 168 // lru tracks caches or datasets by their last use time, keeping at most N of them. 169 type lru struct { 170 what string 171 new func(epoch uint64) interface{} 172 mu sync.Mutex 173 // Items are kept in a LRU cache, but there is a special case: 174 // We always keep an item for (highest seen epoch) + 1 as the 'future item'. 175 cache *simplelru.LRU 176 future uint64 177 futureItem interface{} 178 } 179 180 // newlru create a new least-recently-used cache for either the verification caches 181 // or the mining datasets. 182 func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { 183 if maxItems <= 0 { 184 maxItems = 1 185 } 186 cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { 187 log.Trace("Evicted ethash "+what, "epoch", key) 188 }) 189 return &lru{what: what, new: new, cache: cache} 190 } 191 192 // get retrieves or creates an item for the given epoch. The first return value is always 193 // non-nil. The second return value is non-nil if lru thinks that an item will be useful in 194 // the near future. 195 func (lru *lru) get(epoch uint64) (item, future interface{}) { 196 lru.mu.Lock() 197 defer lru.mu.Unlock() 198 199 // Get or create the item for the requested epoch. 200 item, ok := lru.cache.Get(epoch) 201 if !ok { 202 if lru.future > 0 && lru.future == epoch { 203 item = lru.futureItem 204 } else { 205 log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) 206 item = lru.new(epoch) 207 } 208 lru.cache.Add(epoch, item) 209 } 210 // Update the 'future item' if epoch is larger than previously seen. 211 if epoch < maxEpoch-1 && lru.future < epoch+1 { 212 log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) 213 future = lru.new(epoch + 1) 214 lru.future = epoch + 1 215 lru.futureItem = future 216 } 217 return item, future 218 } 219 220 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 221 type cache struct { 222 epoch uint64 // Epoch for which this cache is relevant 223 dump *os.File // File descriptor of the memory mapped cache 224 mmap mmap.MMap // Memory map itself to unmap before releasing 225 cache []uint32 // The actual cache data content (may be memory mapped) 226 once sync.Once // Ensures the cache is generated only once 227 } 228 229 // newCache creates a new ethash verification cache and returns it as a plain Go 230 // interface to be usable in an LRU cache. 231 func newCache(epoch uint64) interface{} { 232 return &cache{epoch: epoch} 233 } 234 235 // generate ensures that the cache content is generated before use. 236 func (c *cache) generate(dir string, limit int, lock bool, test bool) { 237 c.once.Do(func() { 238 size := cacheSize(c.epoch*epochLength + 1) 239 seed := seedHash(c.epoch*epochLength + 1) 240 if test { 241 size = 1024 242 } 243 // If we don't store anything on disk, generate and return. 244 if dir == "" { 245 c.cache = make([]uint32, size/4) 246 generateCache(c.cache, c.epoch, seed) 247 return 248 } 249 // Disk storage is needed, this will get fancy 250 var endian string 251 if !isLittleEndian() { 252 endian = ".be" 253 } 254 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 255 logger := log.New("epoch", c.epoch) 256 257 // We're about to mmap the file, ensure that the mapping is cleaned up when the 258 // cache becomes unused. 259 runtime.SetFinalizer(c, (*cache).finalizer) 260 261 // Try to load the file from disk and memory map it 262 var err error 263 c.dump, c.mmap, c.cache, err = memoryMap(path, lock) 264 if err == nil { 265 logger.Debug("Loaded old ethash cache from disk") 266 return 267 } 268 logger.Debug("Failed to load old ethash cache", "err", err) 269 270 // No previous cache available, create a new cache file to fill 271 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 272 if err != nil { 273 logger.Error("Failed to generate mapped ethash cache", "err", err) 274 275 c.cache = make([]uint32, size/4) 276 generateCache(c.cache, c.epoch, seed) 277 } 278 // Iterate over all previous instances and delete old ones 279 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 280 seed := seedHash(uint64(ep)*epochLength + 1) 281 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s*", algorithmRevision, seed[:8], endian)) 282 files, _ := filepath.Glob(path) // find also the temp files that are generated. 283 for _, file := range files { 284 os.Remove(file) 285 } 286 } 287 }) 288 } 289 290 // finalizer unmaps the memory and closes the file. 291 func (c *cache) finalizer() { 292 if c.mmap != nil { 293 c.mmap.Unmap() 294 c.dump.Close() 295 c.mmap, c.dump = nil, nil 296 } 297 } 298 299 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 300 type dataset struct { 301 epoch uint64 // Epoch for which this cache is relevant 302 dump *os.File // File descriptor of the memory mapped cache 303 mmap mmap.MMap // Memory map itself to unmap before releasing 304 dataset []uint32 // The actual cache data content 305 once sync.Once // Ensures the cache is generated only once 306 done uint32 // Atomic flag to determine generation status 307 } 308 309 // newDataset creates a new ethash mining dataset and returns it as a plain Go 310 // interface to be usable in an LRU cache. 311 func newDataset(epoch uint64) interface{} { 312 return &dataset{epoch: epoch} 313 } 314 315 // generate ensures that the dataset content is generated before use. 316 func (d *dataset) generate(dir string, limit int, lock bool, test bool) { 317 d.once.Do(func() { 318 // Mark the dataset generated after we're done. This is needed for remote 319 defer atomic.StoreUint32(&d.done, 1) 320 321 csize := cacheSize(d.epoch*epochLength + 1) 322 dsize := datasetSize(d.epoch*epochLength + 1) 323 seed := seedHash(d.epoch*epochLength + 1) 324 if test { 325 csize = 1024 326 dsize = 32 * 1024 327 } 328 // If we don't store anything on disk, generate and return 329 if dir == "" { 330 cache := make([]uint32, csize/4) 331 generateCache(cache, d.epoch, seed) 332 333 d.dataset = make([]uint32, dsize/4) 334 generateDataset(d.dataset, d.epoch, cache) 335 336 return 337 } 338 // Disk storage is needed, this will get fancy 339 var endian string 340 if !isLittleEndian() { 341 endian = ".be" 342 } 343 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 344 logger := log.New("epoch", d.epoch) 345 346 // We're about to mmap the file, ensure that the mapping is cleaned up when the 347 // cache becomes unused. 348 runtime.SetFinalizer(d, (*dataset).finalizer) 349 350 // Try to load the file from disk and memory map it 351 var err error 352 d.dump, d.mmap, d.dataset, err = memoryMap(path, lock) 353 if err == nil { 354 logger.Debug("Loaded old ethash dataset from disk") 355 return 356 } 357 logger.Debug("Failed to load old ethash dataset", "err", err) 358 359 // No previous dataset available, create a new dataset file to fill 360 cache := make([]uint32, csize/4) 361 generateCache(cache, d.epoch, seed) 362 363 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 364 if err != nil { 365 logger.Error("Failed to generate mapped ethash dataset", "err", err) 366 367 d.dataset = make([]uint32, dsize/4) 368 generateDataset(d.dataset, d.epoch, cache) 369 } 370 // Iterate over all previous instances and delete old ones 371 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 372 seed := seedHash(uint64(ep)*epochLength + 1) 373 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 374 os.Remove(path) 375 } 376 }) 377 } 378 379 // generated returns whether this particular dataset finished generating already 380 // or not (it may not have been started at all). This is useful for remote miners 381 // to default to verification caches instead of blocking on DAG generations. 382 func (d *dataset) generated() bool { 383 return atomic.LoadUint32(&d.done) == 1 384 } 385 386 // finalizer closes any file handlers and memory maps open. 387 func (d *dataset) finalizer() { 388 if d.mmap != nil { 389 d.mmap.Unmap() 390 d.dump.Close() 391 d.mmap, d.dump = nil, nil 392 } 393 } 394 395 // MakeCache generates a new ethash cache and optionally stores it to disk. 396 func MakeCache(block uint64, dir string) { 397 c := cache{epoch: block / epochLength} 398 c.generate(dir, math.MaxInt32, false, false) 399 } 400 401 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 402 func MakeDataset(block uint64, dir string) { 403 d := dataset{epoch: block / epochLength} 404 d.generate(dir, math.MaxInt32, false, false) 405 } 406 407 // Mode defines the type and amount of PoW verification an ethash engine makes. 408 type Mode uint 409 410 const ( 411 ModeNormal Mode = iota 412 ModeShared 413 ModeTest 414 ModeFake 415 ModeFullFake 416 ) 417 418 // Config are the configuration parameters of the ethash. 419 type Config struct { 420 CacheDir string 421 CachesInMem int 422 CachesOnDisk int 423 CachesLockMmap bool 424 DatasetDir string 425 DatasetsInMem int 426 DatasetsOnDisk int 427 DatasetsLockMmap bool 428 PowMode Mode 429 430 // When set, notifications sent by the remote sealer will 431 // be block header JSON objects instead of work package arrays. 432 NotifyFull bool 433 434 Log log.Logger `toml:"-"` 435 } 436 437 // Ethash is a consensus engine based on proof-of-work implementing the ethash 438 // algorithm. 439 type Ethash struct { 440 config Config 441 442 caches *lru // In memory caches to avoid regenerating too often 443 datasets *lru // In memory datasets to avoid regenerating too often 444 445 // Mining related fields 446 rand *rand.Rand // Properly seeded random source for nonces 447 threads int // Number of threads to mine on if mining 448 update chan struct{} // Notification channel to update mining parameters 449 hashrate metrics.Meter // Meter tracking the average hashrate 450 remote *remoteSealer 451 452 // The fields below are hooks for testing 453 shared *Ethash // Shared PoW verifier to avoid cache regeneration 454 fakeFail uint64 // Block number which fails PoW check even in fake mode 455 fakeDelay time.Duration // Time delay to sleep for before returning from verify 456 457 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 458 closeOnce sync.Once // Ensures exit channel will not be closed twice. 459 } 460 461 // New creates a full sized ethash PoW scheme and starts a background thread for 462 // remote mining, also optionally notifying a batch of remote services of new work 463 // packages. 464 func New(config Config, notify []string, noverify bool) *Ethash { 465 if config.Log == nil { 466 config.Log = log.Root() 467 } 468 if config.CachesInMem <= 0 { 469 config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 470 config.CachesInMem = 1 471 } 472 if config.CacheDir != "" && config.CachesOnDisk > 0 { 473 config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 474 } 475 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 476 config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 477 } 478 ethash := &Ethash{ 479 config: config, 480 caches: newlru("cache", config.CachesInMem, newCache), 481 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 482 update: make(chan struct{}), 483 hashrate: metrics.NewMeterForced(), 484 } 485 if config.PowMode == ModeShared { 486 ethash.shared = sharedEthash 487 } 488 ethash.remote = startRemoteSealer(ethash, notify, noverify) 489 return ethash 490 } 491 492 // NewTester creates a small sized ethash PoW scheme useful only for testing 493 // purposes. 494 func NewTester(notify []string, noverify bool) *Ethash { 495 return New(Config{PowMode: ModeTest}, notify, noverify) 496 } 497 498 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 499 // all blocks' seal as valid, though they still have to conform to the Ethereum 500 // consensus rules. 501 func NewFaker() *Ethash { 502 return &Ethash{ 503 config: Config{ 504 PowMode: ModeFake, 505 Log: log.Root(), 506 }, 507 } 508 } 509 510 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 511 // accepts all blocks as valid apart from the single one specified, though they 512 // still have to conform to the Ethereum consensus rules. 513 func NewFakeFailer(fail uint64) *Ethash { 514 return &Ethash{ 515 config: Config{ 516 PowMode: ModeFake, 517 Log: log.Root(), 518 }, 519 fakeFail: fail, 520 } 521 } 522 523 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 524 // accepts all blocks as valid, but delays verifications by some time, though 525 // they still have to conform to the Ethereum consensus rules. 526 func NewFakeDelayer(delay time.Duration) *Ethash { 527 return &Ethash{ 528 config: Config{ 529 PowMode: ModeFake, 530 Log: log.Root(), 531 }, 532 fakeDelay: delay, 533 } 534 } 535 536 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 537 // accepts all blocks as valid, without checking any consensus rules whatsoever. 538 func NewFullFaker() *Ethash { 539 return &Ethash{ 540 config: Config{ 541 PowMode: ModeFullFake, 542 Log: log.Root(), 543 }, 544 } 545 } 546 547 // NewShared creates a full sized ethash PoW shared between all requesters running 548 // in the same process. 549 func NewShared() *Ethash { 550 return &Ethash{shared: sharedEthash} 551 } 552 553 // Close closes the exit channel to notify all backend threads exiting. 554 func (ethash *Ethash) Close() error { 555 return ethash.StopRemoteSealer() 556 } 557 558 // StopRemoteSealer stops the remote sealer 559 func (ethash *Ethash) StopRemoteSealer() error { 560 ethash.closeOnce.Do(func() { 561 // Short circuit if the exit channel is not allocated. 562 if ethash.remote == nil { 563 return 564 } 565 close(ethash.remote.requestExit) 566 <-ethash.remote.exitCh 567 }) 568 return nil 569 } 570 571 // cache tries to retrieve a verification cache for the specified block number 572 // by first checking against a list of in-memory caches, then against caches 573 // stored on disk, and finally generating one if none can be found. 574 func (ethash *Ethash) cache(block uint64) *cache { 575 epoch := block / epochLength 576 currentI, futureI := ethash.caches.get(epoch) 577 current := currentI.(*cache) 578 579 // Wait for generation finish. 580 current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) 581 582 // If we need a new future cache, now's a good time to regenerate it. 583 if futureI != nil { 584 future := futureI.(*cache) 585 go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) 586 } 587 return current 588 } 589 590 // dataset tries to retrieve a mining dataset for the specified block number 591 // by first checking against a list of in-memory datasets, then against DAGs 592 // stored on disk, and finally generating one if none can be found. 593 // 594 // If async is specified, not only the future but the current DAG is also 595 // generates on a background thread. 596 func (ethash *Ethash) dataset(block uint64, async bool) *dataset { 597 // Retrieve the requested ethash dataset 598 epoch := block / epochLength 599 currentI, futureI := ethash.datasets.get(epoch) 600 current := currentI.(*dataset) 601 602 // If async is specified, generate everything in a background thread 603 if async && !current.generated() { 604 go func() { 605 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 606 607 if futureI != nil { 608 future := futureI.(*dataset) 609 future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 610 } 611 }() 612 } else { 613 // Either blocking generation was requested, or already done 614 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 615 616 if futureI != nil { 617 future := futureI.(*dataset) 618 go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 619 } 620 } 621 return current 622 } 623 624 // Threads returns the number of mining threads currently enabled. This doesn't 625 // necessarily mean that mining is running! 626 func (ethash *Ethash) Threads() int { 627 ethash.lock.Lock() 628 defer ethash.lock.Unlock() 629 630 return ethash.threads 631 } 632 633 // SetThreads updates the number of mining threads currently enabled. Calling 634 // this method does not start mining, only sets the thread count. If zero is 635 // specified, the miner will use all cores of the machine. Setting a thread 636 // count below zero is allowed and will cause the miner to idle, without any 637 // work being done. 638 func (ethash *Ethash) SetThreads(threads int) { 639 ethash.lock.Lock() 640 defer ethash.lock.Unlock() 641 642 // If we're running a shared PoW, set the thread count on that instead 643 if ethash.shared != nil { 644 ethash.shared.SetThreads(threads) 645 return 646 } 647 // Update the threads and ping any running seal to pull in any changes 648 ethash.threads = threads 649 select { 650 case ethash.update <- struct{}{}: 651 default: 652 } 653 } 654 655 // Hashrate implements PoW, returning the measured rate of the search invocations 656 // per second over the last minute. 657 // Note the returned hashrate includes local hashrate, but also includes the total 658 // hashrate of all remote miner. 659 func (ethash *Ethash) Hashrate() float64 { 660 // Short circuit if we are run the ethash in normal/test mode. 661 if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest { 662 return ethash.hashrate.Rate1() 663 } 664 var res = make(chan uint64, 1) 665 666 select { 667 case ethash.remote.fetchRateCh <- res: 668 case <-ethash.remote.exitCh: 669 // Return local hashrate only if ethash is stopped. 670 return ethash.hashrate.Rate1() 671 } 672 673 // Gather total submitted hash rate of remote sealers. 674 return ethash.hashrate.Rate1() + float64(<-res) 675 } 676 677 // APIs implements consensus.Engine, returning the user facing RPC APIs. 678 func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API { 679 // In order to ensure backward compatibility, we exposes ethash RPC APIs 680 // to both eth and ethash namespaces. 681 return []rpc.API{ 682 { 683 Namespace: "eth", 684 Service: &API{ethash}, 685 }, 686 { 687 Namespace: "ethash", 688 Service: &API{ethash}, 689 }, 690 } 691 } 692 693 // SeedHash is the seed to use for generating a verification cache and the mining 694 // dataset. 695 func SeedHash(block uint64) []byte { 696 return seedHash(block) 697 }