github.com/phillinzzz/newBsc@v1.1.6/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "runtime" 30 "strconv" 31 "sync" 32 "sync/atomic" 33 "time" 34 "unsafe" 35 36 "github.com/edsrzf/mmap-go" 37 "github.com/phillinzzz/newBsc/common/gopool" 38 "github.com/phillinzzz/newBsc/consensus" 39 "github.com/phillinzzz/newBsc/log" 40 "github.com/phillinzzz/newBsc/metrics" 41 "github.com/phillinzzz/newBsc/rpc" 42 "github.com/hashicorp/golang-lru/simplelru" 43 ) 44 45 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 46 47 var ( 48 // two256 is a big integer representing 2^256 49 two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 50 51 // sharedEthash is a full instance that can be shared between multiple users. 52 sharedEthash *Ethash 53 54 // algorithmRevision is the data structure version used for file naming. 55 algorithmRevision = 23 56 57 // dumpMagic is a dataset dump header to sanity check a data dump. 58 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 59 ) 60 61 func init() { 62 sharedConfig := Config{ 63 PowMode: ModeNormal, 64 CachesInMem: 3, 65 DatasetsInMem: 1, 66 } 67 sharedEthash = New(sharedConfig, nil, false) 68 } 69 70 // isLittleEndian returns whether the local system is running in little or big 71 // endian byte order. 72 func isLittleEndian() bool { 73 n := uint32(0x01020304) 74 return *(*byte)(unsafe.Pointer(&n)) == 0x04 75 } 76 77 // memoryMap tries to memory map a file of uint32s for read only access. 78 func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) { 79 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 80 if err != nil { 81 return nil, nil, nil, err 82 } 83 mem, buffer, err := memoryMapFile(file, false) 84 if err != nil { 85 file.Close() 86 return nil, nil, nil, err 87 } 88 for i, magic := range dumpMagic { 89 if buffer[i] != magic { 90 mem.Unmap() 91 file.Close() 92 return nil, nil, nil, ErrInvalidDumpMagic 93 } 94 } 95 if lock { 96 if err := mem.Lock(); err != nil { 97 mem.Unmap() 98 file.Close() 99 return nil, nil, nil, err 100 } 101 } 102 return file, mem, buffer[len(dumpMagic):], err 103 } 104 105 // memoryMapFile tries to memory map an already opened file descriptor. 106 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 107 // Try to memory map the file 108 flag := mmap.RDONLY 109 if write { 110 flag = mmap.RDWR 111 } 112 mem, err := mmap.Map(file, flag, 0) 113 if err != nil { 114 return nil, nil, err 115 } 116 // The file is now memory-mapped. Create a []uint32 view of the file. 117 var view []uint32 118 header := (*reflect.SliceHeader)(unsafe.Pointer(&view)) 119 header.Data = (*reflect.SliceHeader)(unsafe.Pointer(&mem)).Data 120 header.Cap = len(mem) / 4 121 header.Len = header.Cap 122 return mem, view, nil 123 } 124 125 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 126 // access, fill it with the data from a generator and then move it into the final 127 // path requested. 128 func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 129 // Ensure the data folder exists 130 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 131 return nil, nil, nil, err 132 } 133 // Create a huge temporary empty file to fill with data 134 temp := path + "." + strconv.Itoa(rand.Int()) 135 136 dump, err := os.Create(temp) 137 if err != nil { 138 return nil, nil, nil, err 139 } 140 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 141 return nil, nil, nil, err 142 } 143 // Memory map the file for writing and fill it with the generator 144 mem, buffer, err := memoryMapFile(dump, true) 145 if err != nil { 146 dump.Close() 147 return nil, nil, nil, err 148 } 149 copy(buffer, dumpMagic) 150 151 data := buffer[len(dumpMagic):] 152 generator(data) 153 154 if err := mem.Unmap(); err != nil { 155 return nil, nil, nil, err 156 } 157 if err := dump.Close(); err != nil { 158 return nil, nil, nil, err 159 } 160 if err := os.Rename(temp, path); err != nil { 161 return nil, nil, nil, err 162 } 163 return memoryMap(path, lock) 164 } 165 166 // lru tracks caches or datasets by their last use time, keeping at most N of them. 167 type lru struct { 168 what string 169 new func(epoch uint64) interface{} 170 mu sync.Mutex 171 // Items are kept in a LRU cache, but there is a special case: 172 // We always keep an item for (highest seen epoch) + 1 as the 'future item'. 173 cache *simplelru.LRU 174 future uint64 175 futureItem interface{} 176 } 177 178 // newlru create a new least-recently-used cache for either the verification caches 179 // or the mining datasets. 180 func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { 181 if maxItems <= 0 { 182 maxItems = 1 183 } 184 cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { 185 log.Trace("Evicted ethash "+what, "epoch", key) 186 }) 187 return &lru{what: what, new: new, cache: cache} 188 } 189 190 // get retrieves or creates an item for the given epoch. The first return value is always 191 // non-nil. The second return value is non-nil if lru thinks that an item will be useful in 192 // the near future. 193 func (lru *lru) get(epoch uint64) (item, future interface{}) { 194 lru.mu.Lock() 195 defer lru.mu.Unlock() 196 197 // Get or create the item for the requested epoch. 198 item, ok := lru.cache.Get(epoch) 199 if !ok { 200 if lru.future > 0 && lru.future == epoch { 201 item = lru.futureItem 202 } else { 203 log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) 204 item = lru.new(epoch) 205 } 206 lru.cache.Add(epoch, item) 207 } 208 // Update the 'future item' if epoch is larger than previously seen. 209 if epoch < maxEpoch-1 && lru.future < epoch+1 { 210 log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) 211 future = lru.new(epoch + 1) 212 lru.future = epoch + 1 213 lru.futureItem = future 214 } 215 return item, future 216 } 217 218 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 219 type cache struct { 220 epoch uint64 // Epoch for which this cache is relevant 221 dump *os.File // File descriptor of the memory mapped cache 222 mmap mmap.MMap // Memory map itself to unmap before releasing 223 cache []uint32 // The actual cache data content (may be memory mapped) 224 once sync.Once // Ensures the cache is generated only once 225 } 226 227 // newCache creates a new ethash verification cache and returns it as a plain Go 228 // interface to be usable in an LRU cache. 229 func newCache(epoch uint64) interface{} { 230 return &cache{epoch: epoch} 231 } 232 233 // generate ensures that the cache content is generated before use. 234 func (c *cache) generate(dir string, limit int, lock bool, test bool) { 235 c.once.Do(func() { 236 size := cacheSize(c.epoch*epochLength + 1) 237 seed := seedHash(c.epoch*epochLength + 1) 238 if test { 239 size = 1024 240 } 241 // If we don't store anything on disk, generate and return. 242 if dir == "" { 243 c.cache = make([]uint32, size/4) 244 generateCache(c.cache, c.epoch, seed) 245 return 246 } 247 // Disk storage is needed, this will get fancy 248 var endian string 249 if !isLittleEndian() { 250 endian = ".be" 251 } 252 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 253 logger := log.New("epoch", c.epoch) 254 255 // We're about to mmap the file, ensure that the mapping is cleaned up when the 256 // cache becomes unused. 257 runtime.SetFinalizer(c, (*cache).finalizer) 258 259 // Try to load the file from disk and memory map it 260 var err error 261 c.dump, c.mmap, c.cache, err = memoryMap(path, lock) 262 if err == nil { 263 logger.Debug("Loaded old ethash cache from disk") 264 return 265 } 266 logger.Debug("Failed to load old ethash cache", "err", err) 267 268 // No previous cache available, create a new cache file to fill 269 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 270 if err != nil { 271 logger.Error("Failed to generate mapped ethash cache", "err", err) 272 273 c.cache = make([]uint32, size/4) 274 generateCache(c.cache, c.epoch, seed) 275 } 276 // Iterate over all previous instances and delete old ones 277 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 278 seed := seedHash(uint64(ep)*epochLength + 1) 279 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 280 os.Remove(path) 281 } 282 }) 283 } 284 285 // finalizer unmaps the memory and closes the file. 286 func (c *cache) finalizer() { 287 if c.mmap != nil { 288 c.mmap.Unmap() 289 c.dump.Close() 290 c.mmap, c.dump = nil, nil 291 } 292 } 293 294 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 295 type dataset struct { 296 epoch uint64 // Epoch for which this cache is relevant 297 dump *os.File // File descriptor of the memory mapped cache 298 mmap mmap.MMap // Memory map itself to unmap before releasing 299 dataset []uint32 // The actual cache data content 300 once sync.Once // Ensures the cache is generated only once 301 done uint32 // Atomic flag to determine generation status 302 } 303 304 // newDataset creates a new ethash mining dataset and returns it as a plain Go 305 // interface to be usable in an LRU cache. 306 func newDataset(epoch uint64) interface{} { 307 return &dataset{epoch: epoch} 308 } 309 310 // generate ensures that the dataset content is generated before use. 311 func (d *dataset) generate(dir string, limit int, lock bool, test bool) { 312 d.once.Do(func() { 313 // Mark the dataset generated after we're done. This is needed for remote 314 defer atomic.StoreUint32(&d.done, 1) 315 316 csize := cacheSize(d.epoch*epochLength + 1) 317 dsize := datasetSize(d.epoch*epochLength + 1) 318 seed := seedHash(d.epoch*epochLength + 1) 319 if test { 320 csize = 1024 321 dsize = 32 * 1024 322 } 323 // If we don't store anything on disk, generate and return 324 if dir == "" { 325 cache := make([]uint32, csize/4) 326 generateCache(cache, d.epoch, seed) 327 328 d.dataset = make([]uint32, dsize/4) 329 generateDataset(d.dataset, d.epoch, cache) 330 331 return 332 } 333 // Disk storage is needed, this will get fancy 334 var endian string 335 if !isLittleEndian() { 336 endian = ".be" 337 } 338 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 339 logger := log.New("epoch", d.epoch) 340 341 // We're about to mmap the file, ensure that the mapping is cleaned up when the 342 // cache becomes unused. 343 runtime.SetFinalizer(d, (*dataset).finalizer) 344 345 // Try to load the file from disk and memory map it 346 var err error 347 d.dump, d.mmap, d.dataset, err = memoryMap(path, lock) 348 if err == nil { 349 logger.Debug("Loaded old ethash dataset from disk") 350 return 351 } 352 logger.Debug("Failed to load old ethash dataset", "err", err) 353 354 // No previous dataset available, create a new dataset file to fill 355 cache := make([]uint32, csize/4) 356 generateCache(cache, d.epoch, seed) 357 358 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 359 if err != nil { 360 logger.Error("Failed to generate mapped ethash dataset", "err", err) 361 362 d.dataset = make([]uint32, dsize/2) 363 generateDataset(d.dataset, d.epoch, cache) 364 } 365 // Iterate over all previous instances and delete old ones 366 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 367 seed := seedHash(uint64(ep)*epochLength + 1) 368 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 369 os.Remove(path) 370 } 371 }) 372 } 373 374 // generated returns whether this particular dataset finished generating already 375 // or not (it may not have been started at all). This is useful for remote miners 376 // to default to verification caches instead of blocking on DAG generations. 377 func (d *dataset) generated() bool { 378 return atomic.LoadUint32(&d.done) == 1 379 } 380 381 // finalizer closes any file handlers and memory maps open. 382 func (d *dataset) finalizer() { 383 if d.mmap != nil { 384 d.mmap.Unmap() 385 d.dump.Close() 386 d.mmap, d.dump = nil, nil 387 } 388 } 389 390 // MakeCache generates a new ethash cache and optionally stores it to disk. 391 func MakeCache(block uint64, dir string) { 392 c := cache{epoch: block / epochLength} 393 c.generate(dir, math.MaxInt32, false, false) 394 } 395 396 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 397 func MakeDataset(block uint64, dir string) { 398 d := dataset{epoch: block / epochLength} 399 d.generate(dir, math.MaxInt32, false, false) 400 } 401 402 // Mode defines the type and amount of PoW verification an ethash engine makes. 403 type Mode uint 404 405 const ( 406 ModeNormal Mode = iota 407 ModeShared 408 ModeTest 409 ModeFake 410 ModeFullFake 411 ) 412 413 // Config are the configuration parameters of the ethash. 414 type Config struct { 415 CacheDir string 416 CachesInMem int 417 CachesOnDisk int 418 CachesLockMmap bool 419 DatasetDir string 420 DatasetsInMem int 421 DatasetsOnDisk int 422 DatasetsLockMmap bool 423 PowMode Mode 424 425 // When set, notifications sent by the remote sealer will 426 // be block header JSON objects instead of work package arrays. 427 NotifyFull bool 428 429 Log log.Logger `toml:"-"` 430 } 431 432 // Ethash is a consensus engine based on proof-of-work implementing the ethash 433 // algorithm. 434 type Ethash struct { 435 config Config 436 437 caches *lru // In memory caches to avoid regenerating too often 438 datasets *lru // In memory datasets to avoid regenerating too often 439 440 // Mining related fields 441 rand *rand.Rand // Properly seeded random source for nonces 442 threads int // Number of threads to mine on if mining 443 update chan struct{} // Notification channel to update mining parameters 444 hashrate metrics.Meter // Meter tracking the average hashrate 445 remote *remoteSealer 446 447 // The fields below are hooks for testing 448 shared *Ethash // Shared PoW verifier to avoid cache regeneration 449 fakeFail uint64 // Block number which fails PoW check even in fake mode 450 fakeDelay time.Duration // Time delay to sleep for before returning from verify 451 452 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 453 closeOnce sync.Once // Ensures exit channel will not be closed twice. 454 } 455 456 // New creates a full sized ethash PoW scheme and starts a background thread for 457 // remote mining, also optionally notifying a batch of remote services of new work 458 // packages. 459 func New(config Config, notify []string, noverify bool) *Ethash { 460 if config.Log == nil { 461 config.Log = log.Root() 462 } 463 if config.CachesInMem <= 0 { 464 config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 465 config.CachesInMem = 1 466 } 467 if config.CacheDir != "" && config.CachesOnDisk > 0 { 468 config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 469 } 470 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 471 config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 472 } 473 ethash := &Ethash{ 474 config: config, 475 caches: newlru("cache", config.CachesInMem, newCache), 476 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 477 update: make(chan struct{}), 478 hashrate: metrics.NewMeterForced(), 479 } 480 if config.PowMode == ModeShared { 481 ethash.shared = sharedEthash 482 } 483 ethash.remote = startRemoteSealer(ethash, notify, noverify) 484 return ethash 485 } 486 487 // NewTester creates a small sized ethash PoW scheme useful only for testing 488 // purposes. 489 func NewTester(notify []string, noverify bool) *Ethash { 490 return New(Config{PowMode: ModeTest}, notify, noverify) 491 } 492 493 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 494 // all blocks' seal as valid, though they still have to conform to the Ethereum 495 // consensus rules. 496 func NewFaker() *Ethash { 497 return &Ethash{ 498 config: Config{ 499 PowMode: ModeFake, 500 Log: log.Root(), 501 }, 502 } 503 } 504 505 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 506 // accepts all blocks as valid apart from the single one specified, though they 507 // still have to conform to the Ethereum consensus rules. 508 func NewFakeFailer(fail uint64) *Ethash { 509 return &Ethash{ 510 config: Config{ 511 PowMode: ModeFake, 512 Log: log.Root(), 513 }, 514 fakeFail: fail, 515 } 516 } 517 518 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 519 // accepts all blocks as valid, but delays verifications by some time, though 520 // they still have to conform to the Ethereum consensus rules. 521 func NewFakeDelayer(delay time.Duration) *Ethash { 522 return &Ethash{ 523 config: Config{ 524 PowMode: ModeFake, 525 Log: log.Root(), 526 }, 527 fakeDelay: delay, 528 } 529 } 530 531 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 532 // accepts all blocks as valid, without checking any consensus rules whatsoever. 533 func NewFullFaker() *Ethash { 534 return &Ethash{ 535 config: Config{ 536 PowMode: ModeFullFake, 537 Log: log.Root(), 538 }, 539 } 540 } 541 542 // NewShared creates a full sized ethash PoW shared between all requesters running 543 // in the same process. 544 func NewShared() *Ethash { 545 return &Ethash{shared: sharedEthash} 546 } 547 548 // Close closes the exit channel to notify all backend threads exiting. 549 func (ethash *Ethash) Close() error { 550 ethash.closeOnce.Do(func() { 551 // Short circuit if the exit channel is not allocated. 552 if ethash.remote == nil { 553 return 554 } 555 close(ethash.remote.requestExit) 556 <-ethash.remote.exitCh 557 }) 558 return nil 559 } 560 561 // cache tries to retrieve a verification cache for the specified block number 562 // by first checking against a list of in-memory caches, then against caches 563 // stored on disk, and finally generating one if none can be found. 564 func (ethash *Ethash) cache(block uint64) *cache { 565 epoch := block / epochLength 566 currentI, futureI := ethash.caches.get(epoch) 567 current := currentI.(*cache) 568 569 // Wait for generation finish. 570 current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) 571 572 // If we need a new future cache, now's a good time to regenerate it. 573 if futureI != nil { 574 future := futureI.(*cache) 575 go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest) 576 } 577 return current 578 } 579 580 // dataset tries to retrieve a mining dataset for the specified block number 581 // by first checking against a list of in-memory datasets, then against DAGs 582 // stored on disk, and finally generating one if none can be found. 583 // 584 // If async is specified, not only the future but the current DAG is also 585 // generates on a background thread. 586 func (ethash *Ethash) dataset(block uint64, async bool) *dataset { 587 // Retrieve the requested ethash dataset 588 epoch := block / epochLength 589 currentI, futureI := ethash.datasets.get(epoch) 590 current := currentI.(*dataset) 591 592 // If async is specified, generate everything in a background thread 593 if async && !current.generated() { 594 gopool.Submit(func() { 595 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 596 597 if futureI != nil { 598 future := futureI.(*dataset) 599 future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 600 } 601 }) 602 } else { 603 // Either blocking generation was requested, or already done 604 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 605 606 if futureI != nil { 607 future := futureI.(*dataset) 608 go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest) 609 } 610 } 611 return current 612 } 613 614 // Threads returns the number of mining threads currently enabled. This doesn't 615 // necessarily mean that mining is running! 616 func (ethash *Ethash) Threads() int { 617 ethash.lock.Lock() 618 defer ethash.lock.Unlock() 619 620 return ethash.threads 621 } 622 623 // SetThreads updates the number of mining threads currently enabled. Calling 624 // this method does not start mining, only sets the thread count. If zero is 625 // specified, the miner will use all cores of the machine. Setting a thread 626 // count below zero is allowed and will cause the miner to idle, without any 627 // work being done. 628 func (ethash *Ethash) SetThreads(threads int) { 629 ethash.lock.Lock() 630 defer ethash.lock.Unlock() 631 632 // If we're running a shared PoW, set the thread count on that instead 633 if ethash.shared != nil { 634 ethash.shared.SetThreads(threads) 635 return 636 } 637 // Update the threads and ping any running seal to pull in any changes 638 ethash.threads = threads 639 select { 640 case ethash.update <- struct{}{}: 641 default: 642 } 643 } 644 645 // Hashrate implements PoW, returning the measured rate of the search invocations 646 // per second over the last minute. 647 // Note the returned hashrate includes local hashrate, but also includes the total 648 // hashrate of all remote miner. 649 func (ethash *Ethash) Hashrate() float64 { 650 // Short circuit if we are run the ethash in normal/test mode. 651 if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest { 652 return ethash.hashrate.Rate1() 653 } 654 var res = make(chan uint64, 1) 655 656 select { 657 case ethash.remote.fetchRateCh <- res: 658 case <-ethash.remote.exitCh: 659 // Return local hashrate only if ethash is stopped. 660 return ethash.hashrate.Rate1() 661 } 662 663 // Gather total submitted hash rate of remote sealers. 664 return ethash.hashrate.Rate1() + float64(<-res) 665 } 666 667 // APIs implements consensus.Engine, returning the user facing RPC APIs. 668 func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API { 669 // In order to ensure backward compatibility, we exposes ethash RPC APIs 670 // to both eth and ethash namespaces. 671 return []rpc.API{ 672 { 673 Namespace: "eth", 674 Version: "1.0", 675 Service: &API{ethash}, 676 Public: true, 677 }, 678 { 679 Namespace: "ethash", 680 Version: "1.0", 681 Service: &API{ethash}, 682 Public: true, 683 }, 684 } 685 } 686 687 // SeedHash is the seed to use for generating a verification cache and the mining 688 // dataset. 689 func SeedHash(block uint64) []byte { 690 return seedHash(block) 691 }