github.com/daragao/go-ethereum@v1.8.14-0.20180809141559-45eaef243198/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "runtime" 30 "strconv" 31 "sync" 32 "time" 33 "unsafe" 34 35 mmap "github.com/edsrzf/mmap-go" 36 "github.com/ethereum/go-ethereum/common" 37 "github.com/ethereum/go-ethereum/consensus" 38 "github.com/ethereum/go-ethereum/core/types" 39 "github.com/ethereum/go-ethereum/log" 40 "github.com/ethereum/go-ethereum/metrics" 41 "github.com/ethereum/go-ethereum/rpc" 42 "github.com/hashicorp/golang-lru/simplelru" 43 ) 44 45 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 46 47 var ( 48 // maxUint256 is a big integer representing 2^256-1 49 maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 50 51 // sharedEthash is a full instance that can be shared between multiple users. 52 sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}) 53 54 // algorithmRevision is the data structure version used for file naming. 55 algorithmRevision = 23 56 57 // dumpMagic is a dataset dump header to sanity check a data dump. 58 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 59 ) 60 61 // isLittleEndian returns whether the local system is running in little or big 62 // endian byte order. 63 func isLittleEndian() bool { 64 n := uint32(0x01020304) 65 return *(*byte)(unsafe.Pointer(&n)) == 0x04 66 } 67 68 // memoryMap tries to memory map a file of uint32s for read only access. 69 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 70 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 71 if err != nil { 72 return nil, nil, nil, err 73 } 74 mem, buffer, err := memoryMapFile(file, false) 75 if err != nil { 76 file.Close() 77 return nil, nil, nil, err 78 } 79 for i, magic := range dumpMagic { 80 if buffer[i] != magic { 81 mem.Unmap() 82 file.Close() 83 return nil, nil, nil, ErrInvalidDumpMagic 84 } 85 } 86 return file, mem, buffer[len(dumpMagic):], err 87 } 88 89 // memoryMapFile tries to memory map an already opened file descriptor. 90 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 91 // Try to memory map the file 92 flag := mmap.RDONLY 93 if write { 94 flag = mmap.RDWR 95 } 96 mem, err := mmap.Map(file, flag, 0) 97 if err != nil { 98 return nil, nil, err 99 } 100 // Yay, we managed to memory map the file, here be dragons 101 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 102 header.Len /= 4 103 header.Cap /= 4 104 105 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 106 } 107 108 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 109 // access, fill it with the data from a generator and then move it into the final 110 // path requested. 111 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 112 // Ensure the data folder exists 113 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 114 return nil, nil, nil, err 115 } 116 // Create a huge temporary empty file to fill with data 117 temp := path + "." + strconv.Itoa(rand.Int()) 118 119 dump, err := os.Create(temp) 120 if err != nil { 121 return nil, nil, nil, err 122 } 123 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 124 return nil, nil, nil, err 125 } 126 // Memory map the file for writing and fill it with the generator 127 mem, buffer, err := memoryMapFile(dump, true) 128 if err != nil { 129 dump.Close() 130 return nil, nil, nil, err 131 } 132 copy(buffer, dumpMagic) 133 134 data := buffer[len(dumpMagic):] 135 generator(data) 136 137 if err := mem.Unmap(); err != nil { 138 return nil, nil, nil, err 139 } 140 if err := dump.Close(); err != nil { 141 return nil, nil, nil, err 142 } 143 if err := os.Rename(temp, path); err != nil { 144 return nil, nil, nil, err 145 } 146 return memoryMap(path) 147 } 148 149 // lru tracks caches or datasets by their last use time, keeping at most N of them. 150 type lru struct { 151 what string 152 new func(epoch uint64) interface{} 153 mu sync.Mutex 154 // Items are kept in a LRU cache, but there is a special case: 155 // We always keep an item for (highest seen epoch) + 1 as the 'future item'. 156 cache *simplelru.LRU 157 future uint64 158 futureItem interface{} 159 } 160 161 // newlru create a new least-recently-used cache for either the verification caches 162 // or the mining datasets. 163 func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { 164 if maxItems <= 0 { 165 maxItems = 1 166 } 167 cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { 168 log.Trace("Evicted ethash "+what, "epoch", key) 169 }) 170 return &lru{what: what, new: new, cache: cache} 171 } 172 173 // get retrieves or creates an item for the given epoch. The first return value is always 174 // non-nil. The second return value is non-nil if lru thinks that an item will be useful in 175 // the near future. 176 func (lru *lru) get(epoch uint64) (item, future interface{}) { 177 lru.mu.Lock() 178 defer lru.mu.Unlock() 179 180 // Get or create the item for the requested epoch. 181 item, ok := lru.cache.Get(epoch) 182 if !ok { 183 if lru.future > 0 && lru.future == epoch { 184 item = lru.futureItem 185 } else { 186 log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) 187 item = lru.new(epoch) 188 } 189 lru.cache.Add(epoch, item) 190 } 191 // Update the 'future item' if epoch is larger than previously seen. 192 if epoch < maxEpoch-1 && lru.future < epoch+1 { 193 log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) 194 future = lru.new(epoch + 1) 195 lru.future = epoch + 1 196 lru.futureItem = future 197 } 198 return item, future 199 } 200 201 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 202 type cache struct { 203 epoch uint64 // Epoch for which this cache is relevant 204 dump *os.File // File descriptor of the memory mapped cache 205 mmap mmap.MMap // Memory map itself to unmap before releasing 206 cache []uint32 // The actual cache data content (may be memory mapped) 207 once sync.Once // Ensures the cache is generated only once 208 } 209 210 // newCache creates a new ethash verification cache and returns it as a plain Go 211 // interface to be usable in an LRU cache. 212 func newCache(epoch uint64) interface{} { 213 return &cache{epoch: epoch} 214 } 215 216 // generate ensures that the cache content is generated before use. 217 func (c *cache) generate(dir string, limit int, test bool) { 218 c.once.Do(func() { 219 size := cacheSize(c.epoch*epochLength + 1) 220 seed := seedHash(c.epoch*epochLength + 1) 221 if test { 222 size = 1024 223 } 224 // If we don't store anything on disk, generate and return. 225 if dir == "" { 226 c.cache = make([]uint32, size/4) 227 generateCache(c.cache, c.epoch, seed) 228 return 229 } 230 // Disk storage is needed, this will get fancy 231 var endian string 232 if !isLittleEndian() { 233 endian = ".be" 234 } 235 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 236 logger := log.New("epoch", c.epoch) 237 238 // We're about to mmap the file, ensure that the mapping is cleaned up when the 239 // cache becomes unused. 240 runtime.SetFinalizer(c, (*cache).finalizer) 241 242 // Try to load the file from disk and memory map it 243 var err error 244 c.dump, c.mmap, c.cache, err = memoryMap(path) 245 if err == nil { 246 logger.Debug("Loaded old ethash cache from disk") 247 return 248 } 249 logger.Debug("Failed to load old ethash cache", "err", err) 250 251 // No previous cache available, create a new cache file to fill 252 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 253 if err != nil { 254 logger.Error("Failed to generate mapped ethash cache", "err", err) 255 256 c.cache = make([]uint32, size/4) 257 generateCache(c.cache, c.epoch, seed) 258 } 259 // Iterate over all previous instances and delete old ones 260 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 261 seed := seedHash(uint64(ep)*epochLength + 1) 262 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 263 os.Remove(path) 264 } 265 }) 266 } 267 268 // finalizer unmaps the memory and closes the file. 269 func (c *cache) finalizer() { 270 if c.mmap != nil { 271 c.mmap.Unmap() 272 c.dump.Close() 273 c.mmap, c.dump = nil, nil 274 } 275 } 276 277 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 278 type dataset struct { 279 epoch uint64 // Epoch for which this cache is relevant 280 dump *os.File // File descriptor of the memory mapped cache 281 mmap mmap.MMap // Memory map itself to unmap before releasing 282 dataset []uint32 // The actual cache data content 283 once sync.Once // Ensures the cache is generated only once 284 } 285 286 // newDataset creates a new ethash mining dataset and returns it as a plain Go 287 // interface to be usable in an LRU cache. 288 func newDataset(epoch uint64) interface{} { 289 return &dataset{epoch: epoch} 290 } 291 292 // generate ensures that the dataset content is generated before use. 293 func (d *dataset) generate(dir string, limit int, test bool) { 294 d.once.Do(func() { 295 csize := cacheSize(d.epoch*epochLength + 1) 296 dsize := datasetSize(d.epoch*epochLength + 1) 297 seed := seedHash(d.epoch*epochLength + 1) 298 if test { 299 csize = 1024 300 dsize = 32 * 1024 301 } 302 // If we don't store anything on disk, generate and return 303 if dir == "" { 304 cache := make([]uint32, csize/4) 305 generateCache(cache, d.epoch, seed) 306 307 d.dataset = make([]uint32, dsize/4) 308 generateDataset(d.dataset, d.epoch, cache) 309 } 310 // Disk storage is needed, this will get fancy 311 var endian string 312 if !isLittleEndian() { 313 endian = ".be" 314 } 315 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 316 logger := log.New("epoch", d.epoch) 317 318 // We're about to mmap the file, ensure that the mapping is cleaned up when the 319 // cache becomes unused. 320 runtime.SetFinalizer(d, (*dataset).finalizer) 321 322 // Try to load the file from disk and memory map it 323 var err error 324 d.dump, d.mmap, d.dataset, err = memoryMap(path) 325 if err == nil { 326 logger.Debug("Loaded old ethash dataset from disk") 327 return 328 } 329 logger.Debug("Failed to load old ethash dataset", "err", err) 330 331 // No previous dataset available, create a new dataset file to fill 332 cache := make([]uint32, csize/4) 333 generateCache(cache, d.epoch, seed) 334 335 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 336 if err != nil { 337 logger.Error("Failed to generate mapped ethash dataset", "err", err) 338 339 d.dataset = make([]uint32, dsize/2) 340 generateDataset(d.dataset, d.epoch, cache) 341 } 342 // Iterate over all previous instances and delete old ones 343 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 344 seed := seedHash(uint64(ep)*epochLength + 1) 345 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 346 os.Remove(path) 347 } 348 }) 349 } 350 351 // finalizer closes any file handlers and memory maps open. 352 func (d *dataset) finalizer() { 353 if d.mmap != nil { 354 d.mmap.Unmap() 355 d.dump.Close() 356 d.mmap, d.dump = nil, nil 357 } 358 } 359 360 // MakeCache generates a new ethash cache and optionally stores it to disk. 361 func MakeCache(block uint64, dir string) { 362 c := cache{epoch: block / epochLength} 363 c.generate(dir, math.MaxInt32, false) 364 } 365 366 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 367 func MakeDataset(block uint64, dir string) { 368 d := dataset{epoch: block / epochLength} 369 d.generate(dir, math.MaxInt32, false) 370 } 371 372 // Mode defines the type and amount of PoW verification an ethash engine makes. 373 type Mode uint 374 375 const ( 376 ModeNormal Mode = iota 377 ModeShared 378 ModeTest 379 ModeFake 380 ModeFullFake 381 ) 382 383 // Config are the configuration parameters of the ethash. 384 type Config struct { 385 CacheDir string 386 CachesInMem int 387 CachesOnDisk int 388 DatasetDir string 389 DatasetsInMem int 390 DatasetsOnDisk int 391 PowMode Mode 392 } 393 394 // mineResult wraps the pow solution parameters for the specified block. 395 type mineResult struct { 396 nonce types.BlockNonce 397 mixDigest common.Hash 398 hash common.Hash 399 400 errc chan error 401 } 402 403 // hashrate wraps the hash rate submitted by the remote sealer. 404 type hashrate struct { 405 id common.Hash 406 ping time.Time 407 rate uint64 408 409 done chan struct{} 410 } 411 412 // sealWork wraps a seal work package for remote sealer. 413 type sealWork struct { 414 errc chan error 415 res chan [3]string 416 } 417 418 // Ethash is a consensus engine based on proof-of-work implementing the ethash 419 // algorithm. 420 type Ethash struct { 421 config Config 422 423 caches *lru // In memory caches to avoid regenerating too often 424 datasets *lru // In memory datasets to avoid regenerating too often 425 426 // Mining related fields 427 rand *rand.Rand // Properly seeded random source for nonces 428 threads int // Number of threads to mine on if mining 429 update chan struct{} // Notification channel to update mining parameters 430 hashrate metrics.Meter // Meter tracking the average hashrate 431 432 // Remote sealer related fields 433 workCh chan *types.Block // Notification channel to push new work to remote sealer 434 resultCh chan *types.Block // Channel used by mining threads to return result 435 fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work 436 submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result 437 fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. 438 submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate 439 440 // The fields below are hooks for testing 441 shared *Ethash // Shared PoW verifier to avoid cache regeneration 442 fakeFail uint64 // Block number which fails PoW check even in fake mode 443 fakeDelay time.Duration // Time delay to sleep for before returning from verify 444 445 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 446 closeOnce sync.Once // Ensures exit channel will not be closed twice. 447 exitCh chan chan error // Notification channel to exiting backend threads 448 } 449 450 // New creates a full sized ethash PoW scheme and starts a background thread for remote mining. 451 func New(config Config) *Ethash { 452 if config.CachesInMem <= 0 { 453 log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 454 config.CachesInMem = 1 455 } 456 if config.CacheDir != "" && config.CachesOnDisk > 0 { 457 log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 458 } 459 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 460 log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 461 } 462 ethash := &Ethash{ 463 config: config, 464 caches: newlru("cache", config.CachesInMem, newCache), 465 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 466 update: make(chan struct{}), 467 hashrate: metrics.NewMeter(), 468 workCh: make(chan *types.Block), 469 resultCh: make(chan *types.Block), 470 fetchWorkCh: make(chan *sealWork), 471 submitWorkCh: make(chan *mineResult), 472 fetchRateCh: make(chan chan uint64), 473 submitRateCh: make(chan *hashrate), 474 exitCh: make(chan chan error), 475 } 476 go ethash.remote() 477 return ethash 478 } 479 480 // NewTester creates a small sized ethash PoW scheme useful only for testing 481 // purposes. 482 func NewTester() *Ethash { 483 ethash := &Ethash{ 484 config: Config{PowMode: ModeTest}, 485 caches: newlru("cache", 1, newCache), 486 datasets: newlru("dataset", 1, newDataset), 487 update: make(chan struct{}), 488 hashrate: metrics.NewMeter(), 489 workCh: make(chan *types.Block), 490 resultCh: make(chan *types.Block), 491 fetchWorkCh: make(chan *sealWork), 492 submitWorkCh: make(chan *mineResult), 493 fetchRateCh: make(chan chan uint64), 494 submitRateCh: make(chan *hashrate), 495 exitCh: make(chan chan error), 496 } 497 go ethash.remote() 498 return ethash 499 } 500 501 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 502 // all blocks' seal as valid, though they still have to conform to the Ethereum 503 // consensus rules. 504 func NewFaker() *Ethash { 505 return &Ethash{ 506 config: Config{ 507 PowMode: ModeFake, 508 }, 509 } 510 } 511 512 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 513 // accepts all blocks as valid apart from the single one specified, though they 514 // still have to conform to the Ethereum consensus rules. 515 func NewFakeFailer(fail uint64) *Ethash { 516 return &Ethash{ 517 config: Config{ 518 PowMode: ModeFake, 519 }, 520 fakeFail: fail, 521 } 522 } 523 524 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 525 // accepts all blocks as valid, but delays verifications by some time, though 526 // they still have to conform to the Ethereum consensus rules. 527 func NewFakeDelayer(delay time.Duration) *Ethash { 528 return &Ethash{ 529 config: Config{ 530 PowMode: ModeFake, 531 }, 532 fakeDelay: delay, 533 } 534 } 535 536 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 537 // accepts all blocks as valid, without checking any consensus rules whatsoever. 538 func NewFullFaker() *Ethash { 539 return &Ethash{ 540 config: Config{ 541 PowMode: ModeFullFake, 542 }, 543 } 544 } 545 546 // NewShared creates a full sized ethash PoW shared between all requesters running 547 // in the same process. 548 func NewShared() *Ethash { 549 return &Ethash{shared: sharedEthash} 550 } 551 552 // Close closes the exit channel to notify all backend threads exiting. 553 func (ethash *Ethash) Close() error { 554 var err error 555 ethash.closeOnce.Do(func() { 556 // Short circuit if the exit channel is not allocated. 557 if ethash.exitCh == nil { 558 return 559 } 560 errc := make(chan error) 561 ethash.exitCh <- errc 562 err = <-errc 563 close(ethash.exitCh) 564 }) 565 return err 566 } 567 568 // cache tries to retrieve a verification cache for the specified block number 569 // by first checking against a list of in-memory caches, then against caches 570 // stored on disk, and finally generating one if none can be found. 571 func (ethash *Ethash) cache(block uint64) *cache { 572 epoch := block / epochLength 573 currentI, futureI := ethash.caches.get(epoch) 574 current := currentI.(*cache) 575 576 // Wait for generation finish. 577 current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 578 579 // If we need a new future cache, now's a good time to regenerate it. 580 if futureI != nil { 581 future := futureI.(*cache) 582 go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 583 } 584 return current 585 } 586 587 // dataset tries to retrieve a mining dataset for the specified block number 588 // by first checking against a list of in-memory datasets, then against DAGs 589 // stored on disk, and finally generating one if none can be found. 590 func (ethash *Ethash) dataset(block uint64) *dataset { 591 epoch := block / epochLength 592 currentI, futureI := ethash.datasets.get(epoch) 593 current := currentI.(*dataset) 594 595 // Wait for generation finish. 596 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 597 598 // If we need a new future dataset, now's a good time to regenerate it. 599 if futureI != nil { 600 future := futureI.(*dataset) 601 go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 602 } 603 604 return current 605 } 606 607 // Threads returns the number of mining threads currently enabled. This doesn't 608 // necessarily mean that mining is running! 609 func (ethash *Ethash) Threads() int { 610 ethash.lock.Lock() 611 defer ethash.lock.Unlock() 612 613 return ethash.threads 614 } 615 616 // SetThreads updates the number of mining threads currently enabled. Calling 617 // this method does not start mining, only sets the thread count. If zero is 618 // specified, the miner will use all cores of the machine. Setting a thread 619 // count below zero is allowed and will cause the miner to idle, without any 620 // work being done. 621 func (ethash *Ethash) SetThreads(threads int) { 622 ethash.lock.Lock() 623 defer ethash.lock.Unlock() 624 625 // If we're running a shared PoW, set the thread count on that instead 626 if ethash.shared != nil { 627 ethash.shared.SetThreads(threads) 628 return 629 } 630 // Update the threads and ping any running seal to pull in any changes 631 ethash.threads = threads 632 select { 633 case ethash.update <- struct{}{}: 634 default: 635 } 636 } 637 638 // Hashrate implements PoW, returning the measured rate of the search invocations 639 // per second over the last minute. 640 // Note the returned hashrate includes local hashrate, but also includes the total 641 // hashrate of all remote miner. 642 func (ethash *Ethash) Hashrate() float64 { 643 // Short circuit if we are run the ethash in normal/test mode. 644 if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest { 645 return ethash.hashrate.Rate1() 646 } 647 var res = make(chan uint64, 1) 648 649 select { 650 case ethash.fetchRateCh <- res: 651 case <-ethash.exitCh: 652 // Return local hashrate only if ethash is stopped. 653 return ethash.hashrate.Rate1() 654 } 655 656 // Gather total submitted hash rate of remote sealers. 657 return ethash.hashrate.Rate1() + float64(<-res) 658 } 659 660 // APIs implements consensus.Engine, returning the user facing RPC APIs. 661 func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { 662 // In order to ensure backward compatibility, we exposes ethash RPC APIs 663 // to both eth and ethash namespaces. 664 return []rpc.API{ 665 { 666 Namespace: "eth", 667 Version: "1.0", 668 Service: &API{ethash}, 669 Public: true, 670 }, 671 { 672 Namespace: "ethash", 673 Version: "1.0", 674 Service: &API{ethash}, 675 Public: true, 676 }, 677 } 678 } 679 680 // SeedHash is the seed to use for generating a verification cache and the mining 681 // dataset. 682 func SeedHash(block uint64) []byte { 683 return seedHash(block) 684 }