github.com/kisexp/xdchain@v0.0.0-20211206025815-490d6b732aa7/consensus/ethash/ethash.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package ethash implements the ethash proof-of-work consensus engine.
    18  package ethash
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	"math/rand"
    26  	"os"
    27  	"path/filepath"
    28  	"reflect"
    29  	"runtime"
    30  	"strconv"
    31  	"sync"
    32  	"sync/atomic"
    33  	"time"
    34  	"unsafe"
    35  
    36  	"github.com/edsrzf/mmap-go"
    37  	"github.com/kisexp/xdchain/consensus"
    38  	"github.com/kisexp/xdchain/log"
    39  	"github.com/kisexp/xdchain/metrics"
    40  	"github.com/kisexp/xdchain/rpc"
    41  	"github.com/hashicorp/golang-lru/simplelru"
    42  )
    43  
    44  var ErrInvalidDumpMagic = errors.New("invalid dump magic")
    45  
    46  var (
    47  	// two256 is a big integer representing 2^256
    48  	two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
    49  
    50  	// sharedEthash is a full instance that can be shared between multiple users.
    51  	sharedEthash = New(Config{"", 3, 0, false, "", 1, 0, false, ModeNormal, nil}, nil, false)
    52  
    53  	// algorithmRevision is the data structure version used for file naming.
    54  	algorithmRevision = 23
    55  
    56  	// dumpMagic is a dataset dump header to sanity check a data dump.
    57  	dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
    58  )
    59  
    60  // isLittleEndian returns whether the local system is running in little or big
    61  // endian byte order.
    62  func isLittleEndian() bool {
    63  	n := uint32(0x01020304)
    64  	return *(*byte)(unsafe.Pointer(&n)) == 0x04
    65  }
    66  
    67  // memoryMap tries to memory map a file of uint32s for read only access.
    68  func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) {
    69  	file, err := os.OpenFile(path, os.O_RDONLY, 0644)
    70  	if err != nil {
    71  		return nil, nil, nil, err
    72  	}
    73  	mem, buffer, err := memoryMapFile(file, false)
    74  	if err != nil {
    75  		file.Close()
    76  		return nil, nil, nil, err
    77  	}
    78  	for i, magic := range dumpMagic {
    79  		if buffer[i] != magic {
    80  			mem.Unmap()
    81  			file.Close()
    82  			return nil, nil, nil, ErrInvalidDumpMagic
    83  		}
    84  	}
    85  	if lock {
    86  		if err := mem.Lock(); err != nil {
    87  			mem.Unmap()
    88  			file.Close()
    89  			return nil, nil, nil, err
    90  		}
    91  	}
    92  	return file, mem, buffer[len(dumpMagic):], err
    93  }
    94  
    95  // memoryMapFile tries to memory map an already opened file descriptor.
    96  func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
    97  	// Try to memory map the file
    98  	flag := mmap.RDONLY
    99  	if write {
   100  		flag = mmap.RDWR
   101  	}
   102  	mem, err := mmap.Map(file, flag, 0)
   103  	if err != nil {
   104  		return nil, nil, err
   105  	}
   106  	// Yay, we managed to memory map the file, here be dragons
   107  	header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
   108  	header.Len /= 4
   109  	header.Cap /= 4
   110  
   111  	return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil
   112  }
   113  
   114  // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
   115  // access, fill it with the data from a generator and then move it into the final
   116  // path requested.
   117  func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
   118  	// Ensure the data folder exists
   119  	if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
   120  		return nil, nil, nil, err
   121  	}
   122  	// Create a huge temporary empty file to fill with data
   123  	temp := path + "." + strconv.Itoa(rand.Int())
   124  
   125  	dump, err := os.Create(temp)
   126  	if err != nil {
   127  		return nil, nil, nil, err
   128  	}
   129  	if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
   130  		return nil, nil, nil, err
   131  	}
   132  	// Memory map the file for writing and fill it with the generator
   133  	mem, buffer, err := memoryMapFile(dump, true)
   134  	if err != nil {
   135  		dump.Close()
   136  		return nil, nil, nil, err
   137  	}
   138  	copy(buffer, dumpMagic)
   139  
   140  	data := buffer[len(dumpMagic):]
   141  	generator(data)
   142  
   143  	if err := mem.Unmap(); err != nil {
   144  		return nil, nil, nil, err
   145  	}
   146  	if err := dump.Close(); err != nil {
   147  		return nil, nil, nil, err
   148  	}
   149  	if err := os.Rename(temp, path); err != nil {
   150  		return nil, nil, nil, err
   151  	}
   152  	return memoryMap(path, lock)
   153  }
   154  
   155  // lru tracks caches or datasets by their last use time, keeping at most N of them.
   156  type lru struct {
   157  	what string
   158  	new  func(epoch uint64) interface{}
   159  	mu   sync.Mutex
   160  	// Items are kept in a LRU cache, but there is a special case:
   161  	// We always keep an item for (highest seen epoch) + 1 as the 'future item'.
   162  	cache      *simplelru.LRU
   163  	future     uint64
   164  	futureItem interface{}
   165  }
   166  
   167  // newlru create a new least-recently-used cache for either the verification caches
   168  // or the mining datasets.
   169  func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru {
   170  	if maxItems <= 0 {
   171  		maxItems = 1
   172  	}
   173  	cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) {
   174  		log.Trace("Evicted ethash "+what, "epoch", key)
   175  	})
   176  	return &lru{what: what, new: new, cache: cache}
   177  }
   178  
   179  // get retrieves or creates an item for the given epoch. The first return value is always
   180  // non-nil. The second return value is non-nil if lru thinks that an item will be useful in
   181  // the near future.
   182  func (lru *lru) get(epoch uint64) (item, future interface{}) {
   183  	lru.mu.Lock()
   184  	defer lru.mu.Unlock()
   185  
   186  	// Get or create the item for the requested epoch.
   187  	item, ok := lru.cache.Get(epoch)
   188  	if !ok {
   189  		if lru.future > 0 && lru.future == epoch {
   190  			item = lru.futureItem
   191  		} else {
   192  			log.Trace("Requiring new ethash "+lru.what, "epoch", epoch)
   193  			item = lru.new(epoch)
   194  		}
   195  		lru.cache.Add(epoch, item)
   196  	}
   197  	// Update the 'future item' if epoch is larger than previously seen.
   198  	if epoch < maxEpoch-1 && lru.future < epoch+1 {
   199  		log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1)
   200  		future = lru.new(epoch + 1)
   201  		lru.future = epoch + 1
   202  		lru.futureItem = future
   203  	}
   204  	return item, future
   205  }
   206  
   207  // cache wraps an ethash cache with some metadata to allow easier concurrent use.
   208  type cache struct {
   209  	epoch uint64    // Epoch for which this cache is relevant
   210  	dump  *os.File  // File descriptor of the memory mapped cache
   211  	mmap  mmap.MMap // Memory map itself to unmap before releasing
   212  	cache []uint32  // The actual cache data content (may be memory mapped)
   213  	once  sync.Once // Ensures the cache is generated only once
   214  }
   215  
   216  // newCache creates a new ethash verification cache and returns it as a plain Go
   217  // interface to be usable in an LRU cache.
   218  func newCache(epoch uint64) interface{} {
   219  	return &cache{epoch: epoch}
   220  }
   221  
   222  // generate ensures that the cache content is generated before use.
   223  func (c *cache) generate(dir string, limit int, lock bool, test bool) {
   224  	c.once.Do(func() {
   225  		size := cacheSize(c.epoch*epochLength + 1)
   226  		seed := seedHash(c.epoch*epochLength + 1)
   227  		if test {
   228  			size = 1024
   229  		}
   230  		// If we don't store anything on disk, generate and return.
   231  		if dir == "" {
   232  			c.cache = make([]uint32, size/4)
   233  			generateCache(c.cache, c.epoch, seed)
   234  			return
   235  		}
   236  		// Disk storage is needed, this will get fancy
   237  		var endian string
   238  		if !isLittleEndian() {
   239  			endian = ".be"
   240  		}
   241  		path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
   242  		logger := log.New("epoch", c.epoch)
   243  
   244  		// We're about to mmap the file, ensure that the mapping is cleaned up when the
   245  		// cache becomes unused.
   246  		runtime.SetFinalizer(c, (*cache).finalizer)
   247  
   248  		// Try to load the file from disk and memory map it
   249  		var err error
   250  		c.dump, c.mmap, c.cache, err = memoryMap(path, lock)
   251  		if err == nil {
   252  			logger.Debug("Loaded old ethash cache from disk")
   253  			return
   254  		}
   255  		logger.Debug("Failed to load old ethash cache", "err", err)
   256  
   257  		// No previous cache available, create a new cache file to fill
   258  		c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
   259  		if err != nil {
   260  			logger.Error("Failed to generate mapped ethash cache", "err", err)
   261  
   262  			c.cache = make([]uint32, size/4)
   263  			generateCache(c.cache, c.epoch, seed)
   264  		}
   265  		// Iterate over all previous instances and delete old ones
   266  		for ep := int(c.epoch) - limit; ep >= 0; ep-- {
   267  			seed := seedHash(uint64(ep)*epochLength + 1)
   268  			path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
   269  			os.Remove(path)
   270  		}
   271  	})
   272  }
   273  
   274  // finalizer unmaps the memory and closes the file.
   275  func (c *cache) finalizer() {
   276  	if c.mmap != nil {
   277  		c.mmap.Unmap()
   278  		c.dump.Close()
   279  		c.mmap, c.dump = nil, nil
   280  	}
   281  }
   282  
   283  // dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
   284  type dataset struct {
   285  	epoch   uint64    // Epoch for which this cache is relevant
   286  	dump    *os.File  // File descriptor of the memory mapped cache
   287  	mmap    mmap.MMap // Memory map itself to unmap before releasing
   288  	dataset []uint32  // The actual cache data content
   289  	once    sync.Once // Ensures the cache is generated only once
   290  	done    uint32    // Atomic flag to determine generation status
   291  }
   292  
   293  // newDataset creates a new ethash mining dataset and returns it as a plain Go
   294  // interface to be usable in an LRU cache.
   295  func newDataset(epoch uint64) interface{} {
   296  	return &dataset{epoch: epoch}
   297  }
   298  
   299  // generate ensures that the dataset content is generated before use.
   300  func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
   301  	d.once.Do(func() {
   302  		// Mark the dataset generated after we're done. This is needed for remote
   303  		defer atomic.StoreUint32(&d.done, 1)
   304  
   305  		csize := cacheSize(d.epoch*epochLength + 1)
   306  		dsize := datasetSize(d.epoch*epochLength + 1)
   307  		seed := seedHash(d.epoch*epochLength + 1)
   308  		if test {
   309  			csize = 1024
   310  			dsize = 32 * 1024
   311  		}
   312  		// If we don't store anything on disk, generate and return
   313  		if dir == "" {
   314  			cache := make([]uint32, csize/4)
   315  			generateCache(cache, d.epoch, seed)
   316  
   317  			d.dataset = make([]uint32, dsize/4)
   318  			generateDataset(d.dataset, d.epoch, cache)
   319  
   320  			return
   321  		}
   322  		// Disk storage is needed, this will get fancy
   323  		var endian string
   324  		if !isLittleEndian() {
   325  			endian = ".be"
   326  		}
   327  		path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
   328  		logger := log.New("epoch", d.epoch)
   329  
   330  		// We're about to mmap the file, ensure that the mapping is cleaned up when the
   331  		// cache becomes unused.
   332  		runtime.SetFinalizer(d, (*dataset).finalizer)
   333  
   334  		// Try to load the file from disk and memory map it
   335  		var err error
   336  		d.dump, d.mmap, d.dataset, err = memoryMap(path, lock)
   337  		if err == nil {
   338  			logger.Debug("Loaded old ethash dataset from disk")
   339  			return
   340  		}
   341  		logger.Debug("Failed to load old ethash dataset", "err", err)
   342  
   343  		// No previous dataset available, create a new dataset file to fill
   344  		cache := make([]uint32, csize/4)
   345  		generateCache(cache, d.epoch, seed)
   346  
   347  		d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
   348  		if err != nil {
   349  			logger.Error("Failed to generate mapped ethash dataset", "err", err)
   350  
   351  			d.dataset = make([]uint32, dsize/2)
   352  			generateDataset(d.dataset, d.epoch, cache)
   353  		}
   354  		// Iterate over all previous instances and delete old ones
   355  		for ep := int(d.epoch) - limit; ep >= 0; ep-- {
   356  			seed := seedHash(uint64(ep)*epochLength + 1)
   357  			path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
   358  			os.Remove(path)
   359  		}
   360  	})
   361  }
   362  
   363  // generated returns whether this particular dataset finished generating already
   364  // or not (it may not have been started at all). This is useful for remote miners
   365  // to default to verification caches instead of blocking on DAG generations.
   366  func (d *dataset) generated() bool {
   367  	return atomic.LoadUint32(&d.done) == 1
   368  }
   369  
   370  // finalizer closes any file handlers and memory maps open.
   371  func (d *dataset) finalizer() {
   372  	if d.mmap != nil {
   373  		d.mmap.Unmap()
   374  		d.dump.Close()
   375  		d.mmap, d.dump = nil, nil
   376  	}
   377  }
   378  
   379  // MakeCache generates a new ethash cache and optionally stores it to disk.
   380  func MakeCache(block uint64, dir string) {
   381  	c := cache{epoch: block / epochLength}
   382  	c.generate(dir, math.MaxInt32, false, false)
   383  }
   384  
   385  // MakeDataset generates a new ethash dataset and optionally stores it to disk.
   386  func MakeDataset(block uint64, dir string) {
   387  	d := dataset{epoch: block / epochLength}
   388  	d.generate(dir, math.MaxInt32, false, false)
   389  }
   390  
   391  // Mode defines the type and amount of PoW verification an ethash engine makes.
   392  type Mode uint
   393  
   394  const (
   395  	ModeNormal Mode = iota
   396  	ModeShared
   397  	ModeTest
   398  	ModeFake
   399  	ModeFullFake
   400  )
   401  
   402  // Config are the configuration parameters of the ethash.
   403  type Config struct {
   404  	CacheDir         string
   405  	CachesInMem      int
   406  	CachesOnDisk     int
   407  	CachesLockMmap   bool
   408  	DatasetDir       string
   409  	DatasetsInMem    int
   410  	DatasetsOnDisk   int
   411  	DatasetsLockMmap bool
   412  	PowMode          Mode
   413  
   414  	Log log.Logger `toml:"-"`
   415  }
   416  
   417  // Ethash is a consensus engine based on proof-of-work implementing the ethash
   418  // algorithm.
   419  type Ethash struct {
   420  	config Config
   421  
   422  	caches   *lru // In memory caches to avoid regenerating too often
   423  	datasets *lru // In memory datasets to avoid regenerating too often
   424  
   425  	// Mining related fields
   426  	rand     *rand.Rand    // Properly seeded random source for nonces
   427  	threads  int           // Number of threads to mine on if mining
   428  	update   chan struct{} // Notification channel to update mining parameters
   429  	hashrate metrics.Meter // Meter tracking the average hashrate
   430  	remote   *remoteSealer
   431  
   432  	// The fields below are hooks for testing
   433  	shared    *Ethash       // Shared PoW verifier to avoid cache regeneration
   434  	fakeFail  uint64        // Block number which fails PoW check even in fake mode
   435  	fakeDelay time.Duration // Time delay to sleep for before returning from verify
   436  
   437  	lock      sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
   438  	closeOnce sync.Once  // Ensures exit channel will not be closed twice.
   439  }
   440  
   441  // New creates a full sized ethash PoW scheme and starts a background thread for
   442  // remote mining, also optionally notifying a batch of remote services of new work
   443  // packages.
   444  func New(config Config, notify []string, noverify bool) *Ethash {
   445  	if config.Log == nil {
   446  		config.Log = log.Root()
   447  	}
   448  	if config.CachesInMem <= 0 {
   449  		config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
   450  		config.CachesInMem = 1
   451  	}
   452  	if config.CacheDir != "" && config.CachesOnDisk > 0 {
   453  		config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
   454  	}
   455  	if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
   456  		config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
   457  	}
   458  	ethash := &Ethash{
   459  		config:   config,
   460  		caches:   newlru("cache", config.CachesInMem, newCache),
   461  		datasets: newlru("dataset", config.DatasetsInMem, newDataset),
   462  		update:   make(chan struct{}),
   463  		hashrate: metrics.NewMeterForced(),
   464  	}
   465  	ethash.remote = startRemoteSealer(ethash, notify, noverify)
   466  	return ethash
   467  }
   468  
   469  // NewTester creates a small sized ethash PoW scheme useful only for testing
   470  // purposes.
   471  func NewTester(notify []string, noverify bool) *Ethash {
   472  	ethash := &Ethash{
   473  		config:   Config{PowMode: ModeTest, Log: log.Root()},
   474  		caches:   newlru("cache", 1, newCache),
   475  		datasets: newlru("dataset", 1, newDataset),
   476  		update:   make(chan struct{}),
   477  		hashrate: metrics.NewMeterForced(),
   478  	}
   479  	ethash.remote = startRemoteSealer(ethash, notify, noverify)
   480  	return ethash
   481  }
   482  
   483  // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
   484  // all blocks' seal as valid, though they still have to conform to the Ethereum
   485  // consensus rules.
   486  func NewFaker() *Ethash {
   487  	return &Ethash{
   488  		config: Config{
   489  			PowMode: ModeFake,
   490  			Log:     log.Root(),
   491  		},
   492  	}
   493  }
   494  
   495  // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
   496  // accepts all blocks as valid apart from the single one specified, though they
   497  // still have to conform to the Ethereum consensus rules.
   498  func NewFakeFailer(fail uint64) *Ethash {
   499  	return &Ethash{
   500  		config: Config{
   501  			PowMode: ModeFake,
   502  			Log:     log.Root(),
   503  		},
   504  		fakeFail: fail,
   505  	}
   506  }
   507  
   508  // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
   509  // accepts all blocks as valid, but delays verifications by some time, though
   510  // they still have to conform to the Ethereum consensus rules.
   511  func NewFakeDelayer(delay time.Duration) *Ethash {
   512  	return &Ethash{
   513  		config: Config{
   514  			PowMode: ModeFake,
   515  			Log:     log.Root(),
   516  		},
   517  		fakeDelay: delay,
   518  	}
   519  }
   520  
   521  // NewFullFaker creates an ethash consensus engine with a full fake scheme that
   522  // accepts all blocks as valid, without checking any consensus rules whatsoever.
   523  func NewFullFaker() *Ethash {
   524  	return &Ethash{
   525  		config: Config{
   526  			PowMode: ModeFullFake,
   527  			Log:     log.Root(),
   528  		},
   529  	}
   530  }
   531  
   532  // NewShared creates a full sized ethash PoW shared between all requesters running
   533  // in the same process.
   534  func NewShared() *Ethash {
   535  	return &Ethash{shared: sharedEthash}
   536  }
   537  
   538  // Close closes the exit channel to notify all backend threads exiting.
   539  func (ethash *Ethash) Close() error {
   540  	var err error
   541  	ethash.closeOnce.Do(func() {
   542  		// Short circuit if the exit channel is not allocated.
   543  		if ethash.remote == nil {
   544  			return
   545  		}
   546  		close(ethash.remote.requestExit)
   547  		<-ethash.remote.exitCh
   548  	})
   549  	return err
   550  }
   551  
   552  // cache tries to retrieve a verification cache for the specified block number
   553  // by first checking against a list of in-memory caches, then against caches
   554  // stored on disk, and finally generating one if none can be found.
   555  func (ethash *Ethash) cache(block uint64) *cache {
   556  	epoch := block / epochLength
   557  	currentI, futureI := ethash.caches.get(epoch)
   558  	current := currentI.(*cache)
   559  
   560  	// Wait for generation finish.
   561  	current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
   562  
   563  	// If we need a new future cache, now's a good time to regenerate it.
   564  	if futureI != nil {
   565  		future := futureI.(*cache)
   566  		go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
   567  	}
   568  	return current
   569  }
   570  
   571  // dataset tries to retrieve a mining dataset for the specified block number
   572  // by first checking against a list of in-memory datasets, then against DAGs
   573  // stored on disk, and finally generating one if none can be found.
   574  //
   575  // If async is specified, not only the future but the current DAG is also
   576  // generates on a background thread.
   577  func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
   578  	// Retrieve the requested ethash dataset
   579  	epoch := block / epochLength
   580  	currentI, futureI := ethash.datasets.get(epoch)
   581  	current := currentI.(*dataset)
   582  
   583  	// If async is specified, generate everything in a background thread
   584  	if async && !current.generated() {
   585  		go func() {
   586  			current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
   587  
   588  			if futureI != nil {
   589  				future := futureI.(*dataset)
   590  				future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
   591  			}
   592  		}()
   593  	} else {
   594  		// Either blocking generation was requested, or already done
   595  		current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
   596  
   597  		if futureI != nil {
   598  			future := futureI.(*dataset)
   599  			go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
   600  		}
   601  	}
   602  	return current
   603  }
   604  
   605  // Threads returns the number of mining threads currently enabled. This doesn't
   606  // necessarily mean that mining is running!
   607  func (ethash *Ethash) Threads() int {
   608  	ethash.lock.Lock()
   609  	defer ethash.lock.Unlock()
   610  
   611  	return ethash.threads
   612  }
   613  
   614  // SetThreads updates the number of mining threads currently enabled. Calling
   615  // this method does not start mining, only sets the thread count. If zero is
   616  // specified, the miner will use all cores of the machine. Setting a thread
   617  // count below zero is allowed and will cause the miner to idle, without any
   618  // work being done.
   619  func (ethash *Ethash) SetThreads(threads int) {
   620  	ethash.lock.Lock()
   621  	defer ethash.lock.Unlock()
   622  
   623  	// If we're running a shared PoW, set the thread count on that instead
   624  	if ethash.shared != nil {
   625  		ethash.shared.SetThreads(threads)
   626  		return
   627  	}
   628  	// Update the threads and ping any running seal to pull in any changes
   629  	ethash.threads = threads
   630  	select {
   631  	case ethash.update <- struct{}{}:
   632  	default:
   633  	}
   634  }
   635  
   636  // Hashrate implements PoW, returning the measured rate of the search invocations
   637  // per second over the last minute.
   638  // Note the returned hashrate includes local hashrate, but also includes the total
   639  // hashrate of all remote miner.
   640  func (ethash *Ethash) Hashrate() float64 {
   641  	if ethash.hashrate == nil {
   642  		return 0
   643  	}
   644  	// Short circuit if we are run the ethash in normal/test mode.
   645  	if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest {
   646  		return ethash.hashrate.Rate1()
   647  	}
   648  	var res = make(chan uint64, 1)
   649  
   650  	select {
   651  	case ethash.remote.fetchRateCh <- res:
   652  	case <-ethash.remote.exitCh:
   653  		// Return local hashrate only if ethash is stopped.
   654  		return ethash.hashrate.Rate1()
   655  	}
   656  
   657  	// Gather total submitted hash rate of remote sealers.
   658  	return ethash.hashrate.Rate1() + float64(<-res)
   659  }
   660  
   661  // APIs implements consensus.Engine, returning the user facing RPC APIs.
   662  func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API {
   663  	// In order to ensure backward compatibility, we exposes ethash RPC APIs
   664  	// to both eth and ethash namespaces.
   665  	return []rpc.API{
   666  		{
   667  			Namespace: "eth",
   668  			Version:   "1.0",
   669  			Service:   &API{ethash},
   670  			Public:    true,
   671  		},
   672  		{
   673  			Namespace: "ethash",
   674  			Version:   "1.0",
   675  			Service:   &API{ethash},
   676  			Public:    true,
   677  		},
   678  	}
   679  }
   680  
   681  // SeedHash is the seed to use for generating a verification cache and the mining
   682  // dataset.
   683  func SeedHash(block uint64) []byte {
   684  	return seedHash(block)
   685  }
   686  
   687  // Protocol implements consensus.Engine.Protocol
   688  func (ethash *Ethash) Protocol() consensus.Protocol {
   689  	return consensus.EthProtocol
   690  }