github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "runtime" 30 "strconv" 31 "sync" 32 "sync/atomic" 33 "time" 34 "unsafe" 35 36 "github.com/edsrzf/mmap-go" 37 "github.com/ethereum/go-ethereum/consensus" 38 "github.com/ethereum/go-ethereum/log" 39 "github.com/ethereum/go-ethereum/metrics" 40 "github.com/ethereum/go-ethereum/rpc" 41 "github.com/hashicorp/golang-lru/simplelru" 42 ) 43 44 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 45 46 var ( 47 // two256 is a big integer representing 2^256 48 two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 49 50 // sharedEthash is a full instance that can be shared between multiple users. 51 sharedEthash *Ethash 52 53 // algorithmRevision is the data structure version used for file naming. 54 algorithmRevision = 23 55 56 // dumpMagic is a dataset dump header to sanity check a data dump. 57 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 58 ) 59 60 func init() { 61 sharedConfig := Config{ 62 PowMode: ModeNormal, 63 CachesInMem: 3, 64 DatasetsInMem: 1, 65 } 66 sharedEthash = New(sharedConfig, nil, false) 67 } 68 69 // isLittleEndian returns whether the local system is running in little or big 70 // endian byte order. 71 func isLittleEndian() bool { 72 n := uint32(0x01020304) 73 return *(*byte)(unsafe.Pointer(&n)) == 0x04 74 } 75 76 // memoryMap tries to memory map a file of uint32s for read only access. 77 func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) { 78 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 79 if err != nil { 80 return nil, nil, nil, err 81 } 82 mem, buffer, err := memoryMapFile(file, false) 83 if err != nil { 84 file.Close() 85 return nil, nil, nil, err 86 } 87 for i, magic := range dumpMagic { 88 if buffer[i] != magic { 89 mem.Unmap() 90 file.Close() 91 return nil, nil, nil, ErrInvalidDumpMagic 92 } 93 } 94 if lock { 95 if err := mem.Lock(); err != nil { 96 mem.Unmap() 97 file.Close() 98 return nil, nil, nil, err 99 } 100 } 101 return file, mem, buffer[len(dumpMagic):], err 102 } 103 104 // memoryMapFile tries to memory map an already opened file descriptor. 105 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 106 // Try to memory map the file 107 flag := mmap.RDONLY 108 if write { 109 flag = mmap.RDWR 110 } 111 mem, err := mmap.Map(file, flag, 0) 112 if err != nil { 113 return nil, nil, err 114 } 115 // The file is now memory-mapped. Create a []uint32 view of the file. 116 var view []uint32 117 header := (*reflect.SliceHeader)(unsafe.Pointer(&view)) 118 header.Data = (*reflect.SliceHeader)(unsafe.Pointer(&mem)).Data 119 header.Cap = len(mem) / 4 120 header.Len = header.Cap 121 return mem, view, nil 122 } 123 124 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 125 // access, fill it with the data from a generator and then move it into the final 126 // path requested. 127 func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 128 // Ensure the data folder exists 129 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 130 return nil, nil, nil, err 131 } 132 // Create a huge temporary empty file to fill with data 133 temp := path + "." + strconv.Itoa(rand.Int()) 134 135 dump, err := os.Create(temp) 136 if err != nil { 137 return nil, nil, nil, err 138 } 139 if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil { 140 dump.Close() 141 os.Remove(temp) 142 return nil, nil, nil, err 143 } 144 // Memory map the file for writing and fill it with the generator 145 mem, buffer, err := memoryMapFile(dump, true) 146 if err != nil { 147 dump.Close() 148 os.Remove(temp) 149 return nil, nil, nil, err 150 } 151 copy(buffer, dumpMagic) 152 153 data := buffer[len(dumpMagic):] 154 generator(data) 155 156 if err := mem.Unmap(); err != nil { 157 return nil, nil, nil, err 158 } 159 if err := dump.Close(); err != nil { 160 return nil, nil, nil, err 161 } 162 if err := os.Rename(temp, path); err != nil { 163 return nil, nil, nil, err 164 } 165 return memoryMap(path, lock) 166 } 167 168 // lru tracks caches or datasets by their last use time, keeping at most N of them. 169 type lru struct { 170 what string 171 new func(epoch uint64) interface{} 172 mu sync.Mutex 173 // Items are kept in a LRU cache, but there is a special case: 174 // We always keep an item for (highest seen epoch) + 1 as the 'future item'. 175 cache *simplelru.LRU 176 future uint64 177 futureItem interface{} 178 } 179 180 // newlru create a new least-recently-used cache for either the verification caches 181 // or the mining datasets. 182 func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { 183 if maxItems <= 0 { 184 maxItems = 1 185 } 186 cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { 187 log.Trace("Evicted ethash "+what, "epoch", key) 188 }) 189 return &lru{what: what, new: new, cache: cache} 190 } 191 192 // get retrieves or creates an item for the given epoch. The first return value is always 193 // non-nil. The second return value is non-nil if lru thinks that an item will be useful in 194 // the near future. 195 func (lru *lru) get(epoch uint64) (item, future interface{}) { 196 lru.mu.Lock() 197 defer lru.mu.Unlock() 198 199 // Get or create the item for the requested epoch. 200 item, ok := lru.cache.Get(epoch) 201 if !ok { 202 if lru.future > 0 && lru.future == epoch { 203 item = lru.futureItem 204 } else { 205 log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) 206 item = lru.new(epoch) 207 } 208 lru.cache.Add(epoch, item) 209 } 210 // Update the 'future item' if epoch is larger than previously seen. 211 if epoch < maxEpoch-1 && lru.future < epoch+1 { 212 log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) 213 future = lru.new(epoch + 1) 214 lru.future = epoch + 1 215 lru.futureItem = future 216 } 217 return item, future 218 } 219 220 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 221 type cache struct { 222 epoch uint64 // Epoch for which this cache is relevant 223 dump *os.File // File descriptor of the memory mapped cache 224 mmap mmap.MMap // Memory map itself to unmap before releasing 225 cache []uint32 // The actual cache data content (may be memory mapped) 226 once sync.Once // Ensures the cache is generated only once 227 } 228 229 // newCache creates a new ethash verification cache and returns it as a plain Go 230 // interface to be usable in an LRU cache. 231 func newCache(epoch uint64) interface{} { 232 return &cache{epoch: epoch} 233 } 234 235 // generate ensures that the cache content is generated before use. 236 func (c *cache) generate(dir string, limit int, lock bool, test bool) { 237 c.once.Do(func() { 238 size := cacheSize(c.epoch*epochLength + 1) 239 seed := seedHash(c.epoch*epochLength + 1) 240 if test { 241 size = 1024 242 } 243 // If we don't store anything on disk, generate and return. 244 if dir == "" { 245 c.cache = make([]uint32, size/4) 246 generateCache(c.cache, c.epoch, seed) 247 return 248 } 249 // Disk storage is needed, this will get fancy 250 var endian string 251 if !isLittleEndian() { 252 endian = ".be" 253 } 254 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 255 logger := log.New("epoch", c.epoch) 256 257 // We're about to mmap the file, ensure that the mapping is cleaned up when the 258 // cache becomes unused. 259 runtime.SetFinalizer(c, (*cache).finalizer) 260 261 // Try to load the file from disk and memory map it 262 var err error 263 c.dump, c.mmap, c.cache, err = memoryMap(path, lock) 264 if err == nil { 265 logger.Debug("Loaded old ethash cache from disk") 266 return 267 } 268 logger.Debug("Failed to load old ethash cache", "err", err) 269 270 // No previous cache available, create a new cache file to fill 271 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 272 if err != nil { 273 logger.Error("Failed to generate mapped ethash cache", "err", err) 274 275 c.cache = make([]uint32, size/4) 276 generateCache(c.cache, c.epoch, seed) 277 } 278 // Iterate over all previous instances and delete old ones 279 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 280 seed := seedHash(uint64(ep)*epochLength + 1) 281 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 282 os.Remove(path) 283 } 284 }) 285 } 286 287 // finalizer unmaps the memory and closes the file. 288 func (c *cache) finalizer() { 289 if c.mmap != nil { 290 c.mmap.Unmap() 291 c.dump.Close() 292 c.mmap, c.dump = nil, nil 293 } 294 } 295 296 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 297 type dataset struct { 298 epoch uint64 // Epoch for which this cache is relevant 299 dump *os.File // File descriptor of the memory mapped cache 300 mmap mmap.MMap // Memory map itself to unmap before releasing 301 dataset []uint32 // The actual cache data content 302 once sync.Once // Ensures the cache is generated only once 303 done uint32 // Atomic flag to determine generation status 304 } 305 306 // newDataset creates a new ethash mining dataset and returns it as a plain Go 307 // interface to be usable in an LRU cache. 308 func newDataset(epoch uint64) interface{} { 309 return &dataset{epoch: epoch} 310 } 311 312 // generate ensures that the dataset content is generated before use. 313 func (d *dataset) generate(dir string, limit int, lock bool, test bool) { 314 d.once.Do(func() { 315 // Mark the dataset generated after we're done. This is needed for remote 316 defer atomic.StoreUint32(&d.done, 1) 317 318 csize := cacheSize(d.epoch*epochLength + 1) 319 dsize := datasetSize(d.epoch*epochLength + 1) 320 seed := seedHash(d.epoch*epochLength + 1) 321 if test { 322 csize = 1024 323 dsize = 32 * 1024 324 } 325 // If we don't store anything on disk, generate and return 326 if dir == "" { 327 cache := make([]uint32, csize/4) 328 generateCache(cache, d.epoch, seed) 329 330 d.dataset = make([]uint32, dsize/4) 331 generateDataset(d.dataset, d.epoch, cache) 332 333 return 334 } 335 // Disk storage is needed, this will get fancy 336 var endian string 337 if !isLittleEndian() { 338 endian = ".be" 339 } 340 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 341 logger := log.New("epoch", d.epoch) 342 343 // We're about to mmap the file, ensure that the mapping is cleaned up when the 344 // cache becomes unused. 345 runtime.SetFinalizer(d, (*dataset).finalizer) 346 347 // Try to load the file from disk and memory map it 348 var err error 349 d.dump, d.mmap, d.dataset, err = memoryMap(path, lock) 350 if err == nil { 351 logger.Debug("Loaded old ethash dataset from disk") 352 return 353 } 354 logger.Debug("Failed to load old ethash dataset", "err", err) 355 356 // No previous dataset available, create a new dataset file to fill 357 cache := make([]uint32, csize/4) 358 generateCache(cache, d.epoch, seed) 359 360 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 361 if err != nil { 362 logger.Error("Failed to generate mapped ethash dataset", "err", err) 363 364 d.dataset = make([]uint32, dsize/4) 365 generateDataset(d.dataset, d.epoch, cache) 366 } 367 // Iterate over all previous instances and delete old ones 368 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 369 seed := seedHash(uint64(ep)*epochLength + 1) 370 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 371 os.Remove(path) 372 } 373 }) 374 } 375 376 // generated returns whether this particular dataset finished generating already 377 // or not (it may not have been started at all). This is useful for remote miners 378 // to default to verification caches instead of blocking on DAG generations. 379 func (d *dataset) generated() bool { 380 return atomic.LoadUint32(&d.done) == 1 381 } 382 383 // finalizer closes any file handlers and memory maps open. 384 func (d *dataset) finalizer() { 385 if d.mmap != nil { 386 d.mmap.Unmap() 387 d.dump.Close() 388 d.mmap, d.dump = nil, nil 389 } 390 } 391 392 // MakeCache generates a new ethash cache and optionally stores it to disk. 393 func MakeCache(block uint64, dir string) { 394 c := cache{epoch: block / epochLength} 395 c.generate(dir, math.MaxInt32, false, false) 396 } 397 398 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 399 func MakeDataset(block uint64, dir string) { 400 d := dataset{epoch: block / epochLength} 401 d.generate(dir, math.MaxInt32, false, false) 402 } 403 404 // Mode defines the type and amount of PoW verification an ethash engine makes. 405 type Mode uint 406 407 const ( 408 ModeNormal Mode = iota 409 ModeShared 410 ModeTest 411 ModeFake 412 ModeFullFake 413 ) 414 415 // Config are the configuration parameters of the ethash. 416 type Config struct { 417 CacheDir string 418 CachesInMem int 419 CachesOnDisk int 420 CachesLockMmap bool 421 DatasetDir string 422 DatasetsInMem int 423 DatasetsOnDisk int 424 DatasetsLockMmap bool 425 PowMode Mode 426 427 // When set, notifications sent by the remote sealer will 428 // be block header JSON objects instead of work package arrays. 429 NotifyFull bool 430 431 Log log.Logger `toml:"-"` 432 } 433 434 // Ethash is a consensus engine based on proof-of-work implementing the ethash 435 // algorithm. 436 type Ethash struct { 437 config Config 438 439 caches *lru // In memory caches to avoid regenerating too often 440 datasets *lru // In memory datasets to avoid regenerating too often 441 442 // Mining related fields 443 rand *rand.Rand // Properly seeded random source for nonces 444 threads int // Number of threads to mine on if mining 445 update chan struct{} // Notification channel to update mining parameters 446 hashrate metrics.Meter // Meter tracking the average hashrate 447 remote *remoteSealer 448 449 // The fields below are hooks for testing 450 shared *Ethash // Shared PoW verifier to avoid cache regeneration 451 fakeFail uint64 // Block number which fails PoW check even in fake mode 452 fakeDelay time.Duration // Time delay to sleep for before returning from verify 453 454 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 455 closeOnce sync.Once // Ensures exit channel will not be closed twice. 456 } 457 458 // New creates a full sized ethash PoW scheme and starts a background thread for 459 // remote mining, also optionally notifying a batch of remote services of new work 460 // packages. 461 func New(config Config, notify []string, noverify bool) *Ethash { 462 if config.Log == nil { 463 config.Log = log.Root() 464 } 465 if config.CachesInMem <= 0 { 466 config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 467 config.CachesInMem = 1 468 } 469 if config.CacheDir != "" && config.CachesOnDisk > 0 { 470 config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 471 } 472 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 473 config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 474 } 475 ethash := &Ethash{ 476 config: config, 477 caches: newlru("cache", config.CachesInMem, newCache), 478 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 479 update: make(chan struct{}), 480 hashrate: metrics.NewMeterForced(), 481 } 482 if config.PowMode == ModeShared { 483 ethash.shared = sharedEthash 484 } 485 ethash.remote = startRemoteSealer(ethash, notify, noverify) 486 return ethash 487 } 488 489 // NewTester creates a small sized ethash PoW scheme useful only for testing 490 // purposes. 491 func NewTester(notify []string, noverify bool) *Ethash { 492 return New(Config{PowMode: ModeTest}, notify, noverify) 493 } 494 495 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 496 // all blocks' seal as valid, though they still have to conform to the Ethereum 497 // consensus rules. 498 func NewFaker() *Ethash { 499 return &Ethash{ 500 config: Config{ 501 PowMode: ModeFake, 502 Log: log.Root(), 503 }, 504 } 505 } 506 507 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 508 // accepts all blocks as valid apart from the single one specified, though they 509 // still have to conform to the Ethereum consensus rules. 510 func NewFakeFailer(fail uint64) *Ethash { 511 return &Ethash{ 512 config: Config{ 513 PowMode: ModeFake, 514 Log: log.Root(), 515 }, 516 fakeFail: fail, 517 } 518 } 519 520 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 521 // accepts all blocks as valid, but delays verifications by some time, though 522 // they still have to conform to the Ethereum consensus rules. 523 func NewFakeDelayer(delay time.Duration) *Ethash { 524 return &Ethash{ 525 config: Config{ 526 PowMode: ModeFake, 527 Log: log.Root(), 528 }, 529 fakeDelay: delay, 530 } 531 } 532 533 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 534 // accepts all blocks as valid, without checking any consensus rules whatsoever. 535 func NewFullFaker() *Ethash { 536 return &Ethash{ 537 config: Config{ 538 PowMode: ModeFullFake, 539 Log: log.Root(), 540 }, 541 } 542 } 543 544 // NewShared creates a full sized ethash PoW shared between all requesters running 545 // in the same process. 546 func NewShared() *Ethash { 547 return &Ethash{shared: sharedEthash} 548 } 549 550 // Close closes the exit channel to notify all backend threads exiting. 551 func (ethash *Ethash) Close() error { 552 return ethash.StopRemoteSealer() 553 } 554 555 // StopRemoteSealer stops the remote sealer 556 func (ethash *Ethash) StopRemoteSealer() error { 557 ethash.closeOnce.Do(func() { 558 // Short circuit if the exit channel is not allocated. 559 if ethash.remote == nil { 560 return 561 } 562 close(ethash.remote.requestExit) 563 <-ethash.remote.exitCh 564 }) 565 return nil 566 } 567 568 // cache tries to retrieve a verification cache for the specified block number 569 // by first checking against a list of in-memory caches, then against caches 570 // stored on disk, and finally generating one if none can be found. 571 func (ethash *Ethash) cache(block uint64) *cache { 572 epoch := block / epochLength 573 currentI, futureI := ethash.caches.get(epoch) 574 current := currentI.(*cache) 575 576 // Wait for generation finish. 577 current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) 578 579 // If we need a new future cache, now's a good time to regenerate it. 580 if futureI != nil { 581 future := futureI.(*cache) 582 go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) 583 } 584 return current 585 } 586 587 // dataset tries to retrieve a mining dataset for the specified block number 588 // by first checking against a list of in-memory datasets, then against DAGs 589 // stored on disk, and finally generating one if none can be found. 590 // 591 // If async is specified, not only the future but the current DAG is also 592 // generates on a background thread. 593 func (ethash *Ethash) dataset(block uint64, async bool) *dataset { 594 // Retrieve the requested ethash dataset 595 epoch := block / epochLength 596 currentI, futureI := ethash.datasets.get(epoch) 597 current := currentI.(*dataset) 598 599 // If async is specified, generate everything in a background thread 600 if async && !current.generated() { 601 go func() { 602 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 603 604 if futureI != nil { 605 future := futureI.(*dataset) 606 future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 607 } 608 }() 609 } else { 610 // Either blocking generation was requested, or already done 611 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 612 613 if futureI != nil { 614 future := futureI.(*dataset) 615 go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 616 } 617 } 618 return current 619 } 620 621 // Threads returns the number of mining threads currently enabled. This doesn't 622 // necessarily mean that mining is running! 623 func (ethash *Ethash) Threads() int { 624 ethash.lock.Lock() 625 defer ethash.lock.Unlock() 626 627 return ethash.threads 628 } 629 630 // SetThreads updates the number of mining threads currently enabled. Calling 631 // this method does not start mining, only sets the thread count. If zero is 632 // specified, the miner will use all cores of the machine. Setting a thread 633 // count below zero is allowed and will cause the miner to idle, without any 634 // work being done. 635 func (ethash *Ethash) SetThreads(threads int) { 636 ethash.lock.Lock() 637 defer ethash.lock.Unlock() 638 639 // If we're running a shared PoW, set the thread count on that instead 640 if ethash.shared != nil { 641 ethash.shared.SetThreads(threads) 642 return 643 } 644 // Update the threads and ping any running seal to pull in any changes 645 ethash.threads = threads 646 select { 647 case ethash.update <- struct{}{}: 648 default: 649 } 650 } 651 652 // Hashrate implements PoW, returning the measured rate of the search invocations 653 // per second over the last minute. 654 // Note the returned hashrate includes local hashrate, but also includes the total 655 // hashrate of all remote miner. 656 func (ethash *Ethash) Hashrate() float64 { 657 // Short circuit if we are run the ethash in normal/test mode. 658 if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest { 659 return ethash.hashrate.Rate1() 660 } 661 var res = make(chan uint64, 1) 662 663 select { 664 case ethash.remote.fetchRateCh <- res: 665 case <-ethash.remote.exitCh: 666 // Return local hashrate only if ethash is stopped. 667 return ethash.hashrate.Rate1() 668 } 669 670 // Gather total submitted hash rate of remote sealers. 671 return ethash.hashrate.Rate1() + float64(<-res) 672 } 673 674 // APIs implements consensus.Engine, returning the user facing RPC APIs. 675 func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API { 676 // In order to ensure backward compatibility, we exposes ethash RPC APIs 677 // to both eth and ethash namespaces. 678 return []rpc.API{ 679 { 680 Namespace: "eth", 681 Version: "1.0", 682 Service: &API{ethash}, 683 Public: true, 684 }, 685 { 686 Namespace: "ethash", 687 Version: "1.0", 688 Service: &API{ethash}, 689 Public: true, 690 }, 691 } 692 } 693 694 // SeedHash is the seed to use for generating a verification cache and the mining 695 // dataset. 696 func SeedHash(block uint64) []byte { 697 return seedHash(block) 698 }