github.com/bigzoro/my_simplechain@v0.0.0-20240315012955-8ad0a2a29bb9/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The go-simplechain Authors 2 // This file is part of the go-simplechain library. 3 // 4 // The go-simplechain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-simplechain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-simplechain library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "runtime" 30 "strconv" 31 "sync" 32 "sync/atomic" 33 "time" 34 "unsafe" 35 36 "github.com/bigzoro/my_simplechain/consensus" 37 "github.com/bigzoro/my_simplechain/log" 38 "github.com/bigzoro/my_simplechain/metrics" 39 "github.com/bigzoro/my_simplechain/rpc" 40 mmap "github.com/edsrzf/mmap-go" 41 "github.com/hashicorp/golang-lru/simplelru" 42 ) 43 44 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 45 46 var ( 47 // two256 is a big integer representing 2^256 48 two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 49 50 // sharedEthash is a full instance that can be shared between multiple users. 51 sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal, nil}, nil, false) 52 53 // algorithmRevision is the data structure version used for file naming. 54 algorithmRevision = 23 55 56 // dumpMagic is a dataset dump header to sanity check a data dump. 57 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 58 ) 59 60 // isLittleEndian returns whether the local system is running in little or big 61 // endian byte order. 62 func isLittleEndian() bool { 63 n := uint32(0x01020304) 64 return *(*byte)(unsafe.Pointer(&n)) == 0x04 65 } 66 67 // memoryMap tries to memory map a file of uint32s for read only access. 68 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 69 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 70 if err != nil { 71 return nil, nil, nil, err 72 } 73 mem, buffer, err := memoryMapFile(file, false) 74 if err != nil { 75 file.Close() 76 return nil, nil, nil, err 77 } 78 for i, magic := range dumpMagic { 79 if buffer[i] != magic { 80 mem.Unmap() 81 file.Close() 82 return nil, nil, nil, ErrInvalidDumpMagic 83 } 84 } 85 return file, mem, buffer[len(dumpMagic):], err 86 } 87 88 // memoryMapFile tries to memory map an already opened file descriptor. 89 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 90 // Try to memory map the file 91 flag := mmap.RDONLY 92 if write { 93 flag = mmap.RDWR 94 } 95 mem, err := mmap.Map(file, flag, 0) 96 if err != nil { 97 return nil, nil, err 98 } 99 // Yay, we managed to memory map the file, here be dragons 100 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 101 header.Len /= 4 102 header.Cap /= 4 103 104 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 105 } 106 107 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 108 // access, fill it with the data from a generator and then move it into the final 109 // path requested. 110 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 111 // Ensure the data folder exists 112 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 113 return nil, nil, nil, err 114 } 115 // Create a huge temporary empty file to fill with data 116 temp := path + "." + strconv.Itoa(rand.Int()) 117 118 dump, err := os.Create(temp) 119 if err != nil { 120 return nil, nil, nil, err 121 } 122 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 123 return nil, nil, nil, err 124 } 125 // Memory map the file for writing and fill it with the generator 126 mem, buffer, err := memoryMapFile(dump, true) 127 if err != nil { 128 dump.Close() 129 return nil, nil, nil, err 130 } 131 copy(buffer, dumpMagic) 132 133 data := buffer[len(dumpMagic):] 134 generator(data) 135 136 if err := mem.Unmap(); err != nil { 137 return nil, nil, nil, err 138 } 139 if err := dump.Close(); err != nil { 140 return nil, nil, nil, err 141 } 142 if err := os.Rename(temp, path); err != nil { 143 return nil, nil, nil, err 144 } 145 return memoryMap(path) 146 } 147 148 // lru tracks caches or datasets by their last use time, keeping at most N of them. 149 type lru struct { 150 what string 151 new func(epoch uint64) interface{} 152 mu sync.Mutex 153 // Items are kept in a LRU cache, but there is a special case: 154 // We always keep an item for (highest seen epoch) + 1 as the 'future item'. 155 cache *simplelru.LRU 156 future uint64 157 futureItem interface{} 158 } 159 160 // newlru create a new least-recently-used cache for either the verification caches 161 // or the mining datasets. 162 func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { 163 if maxItems <= 0 { 164 maxItems = 1 165 } 166 cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { 167 log.Trace("Evicted ethash "+what, "epoch", key) 168 }) 169 return &lru{what: what, new: new, cache: cache} 170 } 171 172 // get retrieves or creates an item for the given epoch. The first return value is always 173 // non-nil. The second return value is non-nil if lru thinks that an item will be useful in 174 // the near future. 175 func (lru *lru) get(epoch uint64) (item, future interface{}) { 176 lru.mu.Lock() 177 defer lru.mu.Unlock() 178 179 // Get or create the item for the requested epoch. 180 item, ok := lru.cache.Get(epoch) 181 if !ok { 182 if lru.future > 0 && lru.future == epoch { 183 item = lru.futureItem 184 } else { 185 log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) 186 item = lru.new(epoch) 187 } 188 lru.cache.Add(epoch, item) 189 } 190 // Update the 'future item' if epoch is larger than previously seen. 191 if epoch < maxEpoch-1 && lru.future < epoch+1 { 192 log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) 193 future = lru.new(epoch + 1) 194 lru.future = epoch + 1 195 lru.futureItem = future 196 } 197 return item, future 198 } 199 200 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 201 type cache struct { 202 epoch uint64 // Epoch for which this cache is relevant 203 dump *os.File // File descriptor of the memory mapped cache 204 mmap mmap.MMap // Memory map itself to unmap before releasing 205 cache []uint32 // The actual cache data content (may be memory mapped) 206 once sync.Once // Ensures the cache is generated only once 207 } 208 209 // newCache creates a new ethash verification cache and returns it as a plain Go 210 // interface to be usable in an LRU cache. 211 func newCache(epoch uint64) interface{} { 212 return &cache{epoch: epoch} 213 } 214 215 // generate ensures that the cache content is generated before use. 216 func (c *cache) generate(dir string, limit int, test bool) { 217 c.once.Do(func() { 218 size := cacheSize(c.epoch*epochLength + 1) 219 seed := seedHash(c.epoch*epochLength + 1) 220 if test { 221 size = 1024 222 } 223 // If we don't store anything on disk, generate and return. 224 if dir == "" { 225 c.cache = make([]uint32, size/4) 226 generateCache(c.cache, c.epoch, seed) 227 return 228 } 229 // Disk storage is needed, this will get fancy 230 var endian string 231 if !isLittleEndian() { 232 endian = ".be" 233 } 234 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 235 logger := log.New("epoch", c.epoch) 236 237 // We're about to mmap the file, ensure that the mapping is cleaned up when the 238 // cache becomes unused. 239 runtime.SetFinalizer(c, (*cache).finalizer) 240 241 // Try to load the file from disk and memory map it 242 var err error 243 c.dump, c.mmap, c.cache, err = memoryMap(path) 244 if err == nil { 245 logger.Debug("Loaded old ethash cache from disk") 246 return 247 } 248 logger.Debug("Failed to load old ethash cache", "err", err) 249 250 // No previous cache available, create a new cache file to fill 251 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 252 if err != nil { 253 logger.Error("Failed to generate mapped ethash cache", "err", err) 254 255 c.cache = make([]uint32, size/4) 256 generateCache(c.cache, c.epoch, seed) 257 } 258 // Iterate over all previous instances and delete old ones 259 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 260 seed := seedHash(uint64(ep)*epochLength + 1) 261 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 262 os.Remove(path) 263 } 264 }) 265 } 266 267 // finalizer unmaps the memory and closes the file. 268 func (c *cache) finalizer() { 269 if c.mmap != nil { 270 c.mmap.Unmap() 271 c.dump.Close() 272 c.mmap, c.dump = nil, nil 273 } 274 } 275 276 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 277 type dataset struct { 278 epoch uint64 // Epoch for which this cache is relevant 279 dump *os.File // File descriptor of the memory mapped cache 280 mmap mmap.MMap // Memory map itself to unmap before releasing 281 dataset []uint32 // The actual cache data content 282 once sync.Once // Ensures the cache is generated only once 283 done uint32 // Atomic flag to determine generation status 284 } 285 286 // newDataset creates a new ethash mining dataset and returns it as a plain Go 287 // interface to be usable in an LRU cache. 288 func newDataset(epoch uint64) interface{} { 289 return &dataset{epoch: epoch} 290 } 291 292 // generate ensures that the dataset content is generated before use. 293 func (d *dataset) generate(dir string, limit int, test bool) { 294 d.once.Do(func() { 295 // Mark the dataset generated after we're done. This is needed for remote 296 defer atomic.StoreUint32(&d.done, 1) 297 298 csize := cacheSize(d.epoch*epochLength + 1) 299 dsize := datasetSize(d.epoch*epochLength + 1) 300 seed := seedHash(d.epoch*epochLength + 1) 301 if test { 302 csize = 1024 303 dsize = 32 * 1024 304 } 305 // If we don't store anything on disk, generate and return 306 if dir == "" { 307 cache := make([]uint32, csize/4) 308 generateCache(cache, d.epoch, seed) 309 310 d.dataset = make([]uint32, dsize/4) 311 generateDataset(d.dataset, d.epoch, cache) 312 313 return 314 } 315 // Disk storage is needed, this will get fancy 316 var endian string 317 if !isLittleEndian() { 318 endian = ".be" 319 } 320 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 321 logger := log.New("epoch", d.epoch) 322 323 // We're about to mmap the file, ensure that the mapping is cleaned up when the 324 // cache becomes unused. 325 runtime.SetFinalizer(d, (*dataset).finalizer) 326 327 // Try to load the file from disk and memory map it 328 var err error 329 d.dump, d.mmap, d.dataset, err = memoryMap(path) 330 if err == nil { 331 logger.Debug("Loaded old ethash dataset from disk") 332 return 333 } 334 logger.Debug("Failed to load old ethash dataset", "err", err) 335 336 // No previous dataset available, create a new dataset file to fill 337 cache := make([]uint32, csize/4) 338 generateCache(cache, d.epoch, seed) 339 340 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 341 if err != nil { 342 logger.Error("Failed to generate mapped ethash dataset", "err", err) 343 344 d.dataset = make([]uint32, dsize/2) 345 generateDataset(d.dataset, d.epoch, cache) 346 } 347 // Iterate over all previous instances and delete old ones 348 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 349 seed := seedHash(uint64(ep)*epochLength + 1) 350 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 351 os.Remove(path) 352 } 353 }) 354 } 355 356 // generated returns whether this particular dataset finished generating already 357 // or not (it may not have been started at all). This is useful for remote miners 358 // to default to verification caches instead of blocking on DAG generations. 359 func (d *dataset) generated() bool { 360 return atomic.LoadUint32(&d.done) == 1 361 } 362 363 // finalizer closes any file handlers and memory maps open. 364 func (d *dataset) finalizer() { 365 if d.mmap != nil { 366 d.mmap.Unmap() 367 d.dump.Close() 368 d.mmap, d.dump = nil, nil 369 } 370 } 371 372 // MakeCache generates a new ethash cache and optionally stores it to disk. 373 func MakeCache(block uint64, dir string) { 374 c := cache{epoch: block / epochLength} 375 c.generate(dir, math.MaxInt32, false) 376 } 377 378 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 379 func MakeDataset(block uint64, dir string) { 380 d := dataset{epoch: block / epochLength} 381 d.generate(dir, math.MaxInt32, false) 382 } 383 384 // Mode defines the type and amount of PoW verification an ethash engine makes. 385 type Mode uint 386 387 const ( 388 ModeNormal Mode = iota 389 ModeShared 390 ModeTest 391 ModeFake 392 ModeFullFake 393 ) 394 395 // Config are the configuration parameters of the ethash. 396 type Config struct { 397 CacheDir string 398 CachesInMem int 399 CachesOnDisk int 400 DatasetDir string 401 DatasetsInMem int 402 DatasetsOnDisk int 403 PowMode Mode 404 405 Log log.Logger `toml:"-"` 406 } 407 408 // Ethash is a consensus engine based on proof-of-work implementing the ethash 409 // algorithm. 410 type Ethash struct { 411 config Config 412 413 caches *lru // In memory caches to avoid regenerating too often 414 datasets *lru // In memory datasets to avoid regenerating too often 415 416 // Mining related fields 417 rand *rand.Rand // Properly seeded random source for nonces 418 threads int // Number of threads to mine on if mining 419 update chan struct{} // Notification channel to update mining parameters 420 hashrate metrics.Meter // Meter tracking the average hashrate 421 remote *remoteSealer 422 423 // The fields below are hooks for testing 424 shared *Ethash // Shared PoW verifier to avoid cache regeneration 425 fakeFail uint64 // Block number which fails PoW check even in fake mode 426 fakeDelay time.Duration // Time delay to sleep for before returning from verify 427 428 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 429 closeOnce sync.Once // Ensures exit channel will not be closed twice. 430 } 431 432 // New creates a full sized ethash PoW scheme and starts a background thread for 433 // remote mining, also optionally notifying a batch of remote services of new work 434 // packages. 435 func New(config Config, notify []string, noverify bool) *Ethash { 436 if config.Log == nil { 437 config.Log = log.Root() 438 } 439 if config.CachesInMem <= 0 { 440 config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 441 config.CachesInMem = 1 442 } 443 if config.CacheDir != "" && config.CachesOnDisk > 0 { 444 config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 445 } 446 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 447 config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 448 } 449 ethash := &Ethash{ 450 config: config, 451 caches: newlru("cache", config.CachesInMem, newCache), 452 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 453 update: make(chan struct{}), 454 hashrate: metrics.NewMeterForced(), 455 } 456 ethash.remote = startRemoteSealer(ethash, notify, noverify) 457 return ethash 458 } 459 460 // NewTester creates a small sized ethash PoW scheme useful only for testing 461 // purposes. 462 func NewTester(notify []string, noverify bool) *Ethash { 463 ethash := &Ethash{ 464 config: Config{PowMode: ModeTest, Log: log.Root()}, 465 caches: newlru("cache", 1, newCache), 466 datasets: newlru("dataset", 1, newDataset), 467 update: make(chan struct{}), 468 hashrate: metrics.NewMeterForced(), 469 } 470 ethash.remote = startRemoteSealer(ethash, notify, noverify) 471 return ethash 472 } 473 474 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 475 // all blocks' seal as valid, though they still have to conform to the Ethereum 476 // consensus rules. 477 func NewFaker() *Ethash { 478 return &Ethash{ 479 config: Config{ 480 PowMode: ModeFake, 481 Log: log.Root(), 482 }, 483 } 484 } 485 486 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 487 // accepts all blocks as valid apart from the single one specified, though they 488 // still have to conform to the Ethereum consensus rules. 489 func NewFakeFailer(fail uint64) *Ethash { 490 return &Ethash{ 491 config: Config{ 492 PowMode: ModeFake, 493 Log: log.Root(), 494 }, 495 fakeFail: fail, 496 } 497 } 498 499 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 500 // accepts all blocks as valid, but delays verifications by some time, though 501 // they still have to conform to the Ethereum consensus rules. 502 func NewFakeDelayer(delay time.Duration) *Ethash { 503 return &Ethash{ 504 config: Config{ 505 PowMode: ModeFake, 506 Log: log.Root(), 507 }, 508 fakeDelay: delay, 509 } 510 } 511 512 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 513 // accepts all blocks as valid, without checking any consensus rules whatsoever. 514 func NewFullFaker() *Ethash { 515 return &Ethash{ 516 config: Config{ 517 PowMode: ModeFullFake, 518 Log: log.Root(), 519 }, 520 } 521 } 522 523 // NewShared creates a full sized ethash PoW shared between all requesters running 524 // in the same process. 525 func NewShared() *Ethash { 526 return &Ethash{shared: sharedEthash} 527 } 528 529 // Close closes the exit channel to notify all backend threads exiting. 530 func (ethash *Ethash) Close() error { 531 var err error 532 ethash.closeOnce.Do(func() { 533 // Short circuit if the exit channel is not allocated. 534 if ethash.remote == nil { 535 return 536 } 537 close(ethash.remote.requestExit) 538 <-ethash.remote.exitCh 539 }) 540 return err 541 } 542 543 // cache tries to retrieve a verification cache for the specified block number 544 // by first checking against a list of in-memory caches, then against caches 545 // stored on disk, and finally generating one if none can be found. 546 func (ethash *Ethash) cache(block uint64) *cache { 547 epoch := block / epochLength 548 currentI, futureI := ethash.caches.get(epoch) 549 current := currentI.(*cache) 550 551 // Wait for generation finish. 552 current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 553 554 // If we need a new future cache, now's a good time to regenerate it. 555 if futureI != nil { 556 future := futureI.(*cache) 557 go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 558 } 559 return current 560 } 561 562 // dataset tries to retrieve a mining dataset for the specified block number 563 // by first checking against a list of in-memory datasets, then against DAGs 564 // stored on disk, and finally generating one if none can be found. 565 // 566 // If async is specified, not only the future but the current DAG is also 567 // generates on a background thread. 568 func (ethash *Ethash) dataset(block uint64, async bool) *dataset { 569 // Retrieve the requested ethash dataset 570 epoch := block / epochLength 571 currentI, futureI := ethash.datasets.get(epoch) 572 current := currentI.(*dataset) 573 574 // If async is specified, generate everything in a background thread 575 if async && !current.generated() { 576 go func() { 577 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 578 579 if futureI != nil { 580 future := futureI.(*dataset) 581 future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 582 } 583 }() 584 } else { 585 // Either blocking generation was requested, or already done 586 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 587 588 if futureI != nil { 589 future := futureI.(*dataset) 590 go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 591 } 592 } 593 return current 594 } 595 596 // Threads returns the number of mining threads currently enabled. This doesn't 597 // necessarily mean that mining is running! 598 func (ethash *Ethash) Threads() int { 599 ethash.lock.Lock() 600 defer ethash.lock.Unlock() 601 602 return ethash.threads 603 } 604 605 // SetThreads updates the number of mining threads currently enabled. Calling 606 // this method does not start mining, only sets the thread count. If zero is 607 // specified, the miner will use all cores of the machine. Setting a thread 608 // count below zero is allowed and will cause the miner to idle, without any 609 // work being done. 610 func (ethash *Ethash) SetThreads(threads int) { 611 ethash.lock.Lock() 612 defer ethash.lock.Unlock() 613 614 // If we're running a shared PoW, set the thread count on that instead 615 if ethash.shared != nil { 616 ethash.shared.SetThreads(threads) 617 return 618 } 619 // Update the threads and ping any running seal to pull in any changes 620 ethash.threads = threads 621 select { 622 case ethash.update <- struct{}{}: 623 default: 624 } 625 } 626 627 // Hashrate implements PoW, returning the measured rate of the search invocations 628 // per second over the last minute. 629 // Note the returned hashrate includes local hashrate, but also includes the total 630 // hashrate of all remote miner. 631 func (ethash *Ethash) Hashrate() float64 { 632 // Short circuit if we are run the ethash in normal/test mode. 633 if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest { 634 return ethash.hashrate.Rate1() 635 } 636 var res = make(chan uint64, 1) 637 638 select { 639 case ethash.remote.fetchRateCh <- res: 640 case <-ethash.remote.exitCh: 641 // Return local hashrate only if ethash is stopped. 642 return ethash.hashrate.Rate1() 643 } 644 645 // Gather total submitted hash rate of remote sealers. 646 return ethash.hashrate.Rate1() + float64(<-res) 647 } 648 649 // APIs implements consensus.Engine, returning the user facing RPC APIs. 650 func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { 651 // In order to ensure backward compatibility, we exposes ethash RPC APIs 652 // to both eth and ethash namespaces. 653 return []rpc.API{ 654 { 655 Namespace: "eth", 656 Version: "1.0", 657 Service: &API{ethash}, 658 Public: true, 659 }, 660 { 661 Namespace: "ethash", 662 Version: "1.0", 663 Service: &API{ethash}, 664 Public: true, 665 }, 666 } 667 } 668 669 // SeedHash is the seed to use for generating a verification cache and the mining 670 // dataset. 671 func SeedHash(block uint64) []byte { 672 return seedHash(block) 673 }