github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/consensus/ethash/algorithm.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  package ethash
    13  
    14  import (
    15  	"encoding/binary"
    16  	"hash"
    17  	"reflect"
    18  	"runtime"
    19  	"sync"
    20  	"sync/atomic"
    21  	"time"
    22  	"unsafe"
    23  
    24  	"github.com/Sberex/go-sberex/common"
    25  	"github.com/Sberex/go-sberex/common/bitutil"
    26  	"github.com/Sberex/go-sberex/crypto"
    27  	"github.com/Sberex/go-sberex/crypto/sha3"
    28  	"github.com/Sberex/go-sberex/log"
    29  )
    30  
    31  const (
    32  	datasetInitBytes   = 1 << 29 // Bytes in dataset at genesis
    33  	datasetGrowthBytes = 0       // Dataset growth per epoch
    34  	cacheInitBytes     = 1 << 23 // Bytes in cache at genesis
    35  	cacheGrowthBytes   = 0       // Cache growth per epoch
    36  	epochLength        = 90000   // Blocks per epoch
    37  	mixBytes           = 128     // Width of mix
    38  	hashBytes          = 64      // Hash length in bytes
    39  	hashWords          = 16      // Number of 32 bit ints in a hash
    40  	datasetParents     = 256     // Number of parents of each dataset element
    41  	cacheRounds        = 3       // Number of rounds in cache production
    42  	loopAccesses       = 64      // Number of accesses in hashimoto loop
    43  )
    44  
    45  // hasher is a repetitive hasher allowing the same hash data structures to be
    46  // reused between hash runs instead of requiring new ones to be created.
    47  type hasher func(dest []byte, data []byte)
    48  
    49  // makeHasher creates a repetitive hasher, allowing the same hash data structures
    50  // to be reused between hash runs instead of requiring new ones to be created.
    51  // The returned function is not thread safe!
    52  func makeHasher(h hash.Hash) hasher {
    53  	return func(dest []byte, data []byte) {
    54  		h.Write(data)
    55  		h.Sum(dest[:0])
    56  		h.Reset()
    57  	}
    58  }
    59  
    60  // seedHash is the seed to use for generating a verification cache and the mining
    61  // dataset.
    62  func seedHash(block uint64) []byte {
    63  	seed := make([]byte, 32)
    64  	if block < epochLength {
    65  		return seed
    66  	}
    67  	keccak256 := makeHasher(sha3.NewKeccak256())
    68  	for i := 0; i < int(block/epochLength); i++ {
    69  		keccak256(seed, seed)
    70  	}
    71  	return seed
    72  }
    73  
    74  // generateCache creates a verification cache of a given size for an input seed.
    75  // The cache production process involves first sequentially filling up 32 MB of
    76  // memory, then performing two passes of Sergio Demian Lerner's RandMemoHash
    77  // algorithm from Strict Memory Hard Hashing Functions (2014). The output is a
    78  // set of 524288 64-byte values.
    79  // This method places the result into dest in machine byte order.
    80  func generateCache(dest []uint32, epoch uint64, seed []byte) {
    81  	// Print some debug logs to allow analysis on low end devices
    82  	logger := log.New("epoch", epoch)
    83  
    84  	start := time.Now()
    85  	defer func() {
    86  		elapsed := time.Since(start)
    87  
    88  		logFn := logger.Debug
    89  		if elapsed > 3*time.Second {
    90  			logFn = logger.Info
    91  		}
    92  		logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
    93  	}()
    94  	// Convert our destination slice to a byte buffer
    95  	header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest))
    96  	header.Len *= 4
    97  	header.Cap *= 4
    98  	cache := *(*[]byte)(unsafe.Pointer(&header))
    99  
   100  	// Calculate the number of theoretical rows (we'll store in one buffer nonetheless)
   101  	size := uint64(len(cache))
   102  	rows := int(size) / hashBytes
   103  
   104  	// Start a monitoring goroutine to report progress on low end devices
   105  	var progress uint32
   106  
   107  	done := make(chan struct{})
   108  	defer close(done)
   109  
   110  	go func() {
   111  		for {
   112  			select {
   113  			case <-done:
   114  				return
   115  			case <-time.After(3 * time.Second):
   116  				logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start)))
   117  			}
   118  		}
   119  	}()
   120  	// Create a hasher to reuse between invocations
   121  	keccak512 := makeHasher(sha3.NewKeccak512())
   122  
   123  	// Sequentially produce the initial dataset
   124  	keccak512(cache, seed)
   125  	for offset := uint64(hashBytes); offset < size; offset += hashBytes {
   126  		keccak512(cache[offset:], cache[offset-hashBytes:offset])
   127  		atomic.AddUint32(&progress, 1)
   128  	}
   129  	// Use a low-round version of randmemohash
   130  	temp := make([]byte, hashBytes)
   131  
   132  	for i := 0; i < cacheRounds; i++ {
   133  		for j := 0; j < rows; j++ {
   134  			var (
   135  				srcOff = ((j - 1 + rows) % rows) * hashBytes
   136  				dstOff = j * hashBytes
   137  				xorOff = (binary.LittleEndian.Uint32(cache[dstOff:]) % uint32(rows)) * hashBytes
   138  			)
   139  			bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
   140  			keccak512(cache[dstOff:], temp)
   141  
   142  			atomic.AddUint32(&progress, 1)
   143  		}
   144  	}
   145  	// Swap the byte order on big endian systems and return
   146  	if !isLittleEndian() {
   147  		swap(cache)
   148  	}
   149  }
   150  
   151  // swap changes the byte order of the buffer assuming a uint32 representation.
   152  func swap(buffer []byte) {
   153  	for i := 0; i < len(buffer); i += 4 {
   154  		binary.BigEndian.PutUint32(buffer[i:], binary.LittleEndian.Uint32(buffer[i:]))
   155  	}
   156  }
   157  
   158  // prepare converts an ethash cache or dataset from a byte stream into the internal
   159  // int representation. All ethash methods work with ints to avoid constant byte to
   160  // int conversions as well as to handle both little and big endian systems.
   161  func prepare(dest []uint32, src []byte) {
   162  	for i := 0; i < len(dest); i++ {
   163  		dest[i] = binary.LittleEndian.Uint32(src[i*4:])
   164  	}
   165  }
   166  
   167  // fnv is an algorithm inspired by the FNV hash, which in some cases is used as
   168  // a non-associative substitute for XOR. Note that we multiply the prime with
   169  // the full 32-bit input, in contrast with the FNV-1 spec which multiplies the
   170  // prime with one byte (octet) in turn.
   171  func fnv(a, b uint32) uint32 {
   172  	return a*0x01000193 ^ b
   173  }
   174  
   175  // fnvHash mixes in data into mix using the ethash fnv method.
   176  func fnvHash(mix []uint32, data []uint32) {
   177  	for i := 0; i < len(mix); i++ {
   178  		mix[i] = mix[i]*0x01000193 ^ data[i]
   179  	}
   180  }
   181  
   182  // generateDatasetItem combines data from 256 pseudorandomly selected cache nodes,
   183  // and hashes that to compute a single dataset node.
   184  func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte {
   185  	// Calculate the number of theoretical rows (we use one buffer nonetheless)
   186  	rows := uint32(len(cache) / hashWords)
   187  
   188  	// Initialize the mix
   189  	mix := make([]byte, hashBytes)
   190  
   191  	binary.LittleEndian.PutUint32(mix, cache[(index%rows)*hashWords]^index)
   192  	for i := 1; i < hashWords; i++ {
   193  		binary.LittleEndian.PutUint32(mix[i*4:], cache[(index%rows)*hashWords+uint32(i)])
   194  	}
   195  	keccak512(mix, mix)
   196  
   197  	// Convert the mix to uint32s to avoid constant bit shifting
   198  	intMix := make([]uint32, hashWords)
   199  	for i := 0; i < len(intMix); i++ {
   200  		intMix[i] = binary.LittleEndian.Uint32(mix[i*4:])
   201  	}
   202  	// fnv it with a lot of random cache nodes based on index
   203  	for i := uint32(0); i < datasetParents; i++ {
   204  		parent := fnv(index^i, intMix[i%16]) % rows
   205  		fnvHash(intMix, cache[parent*hashWords:])
   206  	}
   207  	// Flatten the uint32 mix into a binary one and return
   208  	for i, val := range intMix {
   209  		binary.LittleEndian.PutUint32(mix[i*4:], val)
   210  	}
   211  	keccak512(mix, mix)
   212  	return mix
   213  }
   214  
   215  // generateDataset generates the entire ethash dataset for mining.
   216  // This method places the result into dest in machine byte order.
   217  func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
   218  	// Print some debug logs to allow analysis on low end devices
   219  	logger := log.New("epoch", epoch)
   220  
   221  	start := time.Now()
   222  	defer func() {
   223  		elapsed := time.Since(start)
   224  
   225  		logFn := logger.Debug
   226  		if elapsed > 3*time.Second {
   227  			logFn = logger.Info
   228  		}
   229  		logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
   230  	}()
   231  
   232  	// Figure out whether the bytes need to be swapped for the machine
   233  	swapped := !isLittleEndian()
   234  
   235  	// Convert our destination slice to a byte buffer
   236  	header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest))
   237  	header.Len *= 4
   238  	header.Cap *= 4
   239  	dataset := *(*[]byte)(unsafe.Pointer(&header))
   240  
   241  	// Generate the dataset on many goroutines since it takes a while
   242  	threads := runtime.NumCPU()
   243  	size := uint64(len(dataset))
   244  
   245  	var pend sync.WaitGroup
   246  	pend.Add(threads)
   247  
   248  	var progress uint32
   249  	for i := 0; i < threads; i++ {
   250  		go func(id int) {
   251  			defer pend.Done()
   252  
   253  			// Create a hasher to reuse between invocations
   254  			keccak512 := makeHasher(sha3.NewKeccak512())
   255  
   256  			// Calculate the data segment this thread should generate
   257  			batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
   258  			first := uint32(id) * batch
   259  			limit := first + batch
   260  			if limit > uint32(size/hashBytes) {
   261  				limit = uint32(size / hashBytes)
   262  			}
   263  			// Calculate the dataset segment
   264  			percent := uint32(size / hashBytes / 100)
   265  			for index := first; index < limit; index++ {
   266  				item := generateDatasetItem(cache, index, keccak512)
   267  				if swapped {
   268  					swap(item)
   269  				}
   270  				copy(dataset[index*hashBytes:], item)
   271  
   272  				if status := atomic.AddUint32(&progress, 1); status%percent == 0 {
   273  					logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
   274  				}
   275  			}
   276  		}(i)
   277  	}
   278  	// Wait for all the generators to finish and return
   279  	pend.Wait()
   280  }
   281  
   282  // hashimoto aggregates data from the full dataset in order to produce our final
   283  // value for a particular header hash and nonce.
   284  func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32) []uint32) ([]byte, []byte) {
   285  	// Calculate the number of theoretical rows (we use one buffer nonetheless)
   286  	rows := uint32(size / mixBytes)
   287  
   288  	// Combine header+nonce into a 64 byte seed
   289  	seed := make([]byte, 40)
   290  	copy(seed, hash)
   291  	binary.LittleEndian.PutUint64(seed[32:], nonce)
   292  
   293  	seed = crypto.Keccak512(seed)
   294  	seedHead := binary.LittleEndian.Uint32(seed)
   295  
   296  	// Start the mix with replicated seed
   297  	mix := make([]uint32, mixBytes/4)
   298  	for i := 0; i < len(mix); i++ {
   299  		mix[i] = binary.LittleEndian.Uint32(seed[i%16*4:])
   300  	}
   301  	// Mix in random dataset nodes
   302  	temp := make([]uint32, len(mix))
   303  
   304  	for i := 0; i < loopAccesses; i++ {
   305  		parent := fnv(uint32(i)^seedHead, mix[i%len(mix)]) % rows
   306  		for j := uint32(0); j < mixBytes/hashBytes; j++ {
   307  			copy(temp[j*hashWords:], lookup(2*parent+j))
   308  		}
   309  		fnvHash(mix, temp)
   310  	}
   311  	// Compress mix
   312  	for i := 0; i < len(mix); i += 4 {
   313  		mix[i/4] = fnv(fnv(fnv(mix[i], mix[i+1]), mix[i+2]), mix[i+3])
   314  	}
   315  	mix = mix[:len(mix)/4]
   316  
   317  	digest := make([]byte, common.HashLength)
   318  	for i, val := range mix {
   319  		binary.LittleEndian.PutUint32(digest[i*4:], val)
   320  	}
   321  	return digest, crypto.Keccak256(append(seed, digest...))
   322  }
   323  
   324  // hashimotoLight aggregates data from the full dataset (using only a small
   325  // in-memory cache) in order to produce our final value for a particular header
   326  // hash and nonce.
   327  func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
   328  	keccak512 := makeHasher(sha3.NewKeccak512())
   329  
   330  	lookup := func(index uint32) []uint32 {
   331  		rawData := generateDatasetItem(cache, index, keccak512)
   332  
   333  		data := make([]uint32, len(rawData)/4)
   334  		for i := 0; i < len(data); i++ {
   335  			data[i] = binary.LittleEndian.Uint32(rawData[i*4:])
   336  		}
   337  		return data
   338  	}
   339  	return hashimoto(hash, nonce, size, lookup)
   340  }
   341  
   342  // hashimotoFull aggregates data from the full dataset (using the full in-memory
   343  // dataset) in order to produce our final value for a particular header hash and
   344  // nonce.
   345  func hashimotoFull(dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
   346  	lookup := func(index uint32) []uint32 {
   347  		offset := index * hashWords
   348  		return dataset[offset : offset+hashWords]
   349  	}
   350  	return hashimoto(hash, nonce, uint64(len(dataset))*4, lookup)
   351  }
   352  
   353  const maxEpoch = 1
   354  
   355  // datasetSizes is a lookup table for the ethash dataset size for the first
   356  // epoch.
   357  var datasetSizes = [maxEpoch]uint64{
   358  	536870528}
   359  
   360  // cacheSizes is a lookup table for the ethash verification cache size for the
   361  // first epoch.
   362  var cacheSizes = [maxEpoch]uint64{
   363  	8388544}