github.com/devfans/go-ethereum@v1.5.10-0.20170326212234-7419d0c38291/pow/ethash.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package pow 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "strconv" 30 "sync" 31 "time" 32 "unsafe" 33 34 mmap "github.com/edsrzf/mmap-go" 35 "github.com/ethereum/go-ethereum/log" 36 metrics "github.com/rcrowley/go-metrics" 37 ) 38 39 var ( 40 ErrInvalidDumpMagic = errors.New("invalid dump magic") 41 ErrNonceOutOfRange = errors.New("nonce out of range") 42 ErrInvalidDifficulty = errors.New("non-positive difficulty") 43 ErrInvalidMixDigest = errors.New("invalid mix digest") 44 ErrInvalidPoW = errors.New("pow difficulty invalid") 45 ) 46 47 var ( 48 // maxUint256 is a big integer representing 2^256-1 49 maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 50 51 // sharedEthash is a full instance that can be shared between multiple users. 52 sharedEthash = NewFullEthash("", 3, 0, "", 1, 0) 53 54 // algorithmRevision is the data structure version used for file naming. 55 algorithmRevision = 23 56 57 // dumpMagic is a dataset dump header to sanity check a data dump. 58 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 59 ) 60 61 // isLittleEndian returns whether the local system is running in little or big 62 // endian byte order. 63 func isLittleEndian() bool { 64 n := uint32(0x01020304) 65 return *(*byte)(unsafe.Pointer(&n)) == 0x04 66 } 67 68 // memoryMap tries to memory map a file of uint32s for read only access. 69 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 70 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 71 if err != nil { 72 return nil, nil, nil, err 73 } 74 mem, buffer, err := memoryMapFile(file, false) 75 if err != nil { 76 file.Close() 77 return nil, nil, nil, err 78 } 79 for i, magic := range dumpMagic { 80 if buffer[i] != magic { 81 mem.Unmap() 82 file.Close() 83 return nil, nil, nil, ErrInvalidDumpMagic 84 } 85 } 86 return file, mem, buffer[len(dumpMagic):], err 87 } 88 89 // memoryMapFile tries to memory map an already opened file descriptor. 90 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 91 // Try to memory map the file 92 flag := mmap.RDONLY 93 if write { 94 flag = mmap.RDWR 95 } 96 mem, err := mmap.Map(file, flag, 0) 97 if err != nil { 98 return nil, nil, err 99 } 100 // Yay, we managed to memory map the file, here be dragons 101 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 102 header.Len /= 4 103 header.Cap /= 4 104 105 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 106 } 107 108 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 109 // access, fill it with the data from a generator and then move it into the final 110 // path requested. 111 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 112 // Ensure the data folder exists 113 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 114 return nil, nil, nil, err 115 } 116 // Create a huge temporary empty file to fill with data 117 temp := path + "." + strconv.Itoa(rand.Int()) 118 119 dump, err := os.Create(temp) 120 if err != nil { 121 return nil, nil, nil, err 122 } 123 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 124 return nil, nil, nil, err 125 } 126 // Memory map the file for writing and fill it with the generator 127 mem, buffer, err := memoryMapFile(dump, true) 128 if err != nil { 129 dump.Close() 130 return nil, nil, nil, err 131 } 132 copy(buffer, dumpMagic) 133 134 data := buffer[len(dumpMagic):] 135 generator(data) 136 137 if err := mem.Flush(); err != nil { 138 mem.Unmap() 139 dump.Close() 140 return nil, nil, nil, err 141 } 142 os.Rename(temp, path) 143 return dump, mem, data, nil 144 } 145 146 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 147 type cache struct { 148 epoch uint64 // Epoch for which this cache is relevant 149 150 dump *os.File // File descriptor of the memory mapped cache 151 mmap mmap.MMap // Memory map itself to unmap before releasing 152 153 cache []uint32 // The actual cache data content (may be memory mapped) 154 used time.Time // Timestamp of the last use for smarter eviction 155 once sync.Once // Ensures the cache is generated only once 156 lock sync.Mutex // Ensures thread safety for updating the usage time 157 } 158 159 // generate ensures that the cache content is generated before use. 160 func (c *cache) generate(dir string, limit int, test bool) { 161 c.once.Do(func() { 162 // If we have a testing cache, generate and return 163 if test { 164 c.cache = make([]uint32, 1024/4) 165 generateCache(c.cache, c.epoch, seedHash(c.epoch*epochLength+1)) 166 return 167 } 168 // If we don't store anything on disk, generate and return 169 size := cacheSize(c.epoch*epochLength + 1) 170 seed := seedHash(c.epoch*epochLength + 1) 171 172 if dir == "" { 173 c.cache = make([]uint32, size/4) 174 generateCache(c.cache, c.epoch, seed) 175 return 176 } 177 // Disk storage is needed, this will get fancy 178 var endian string 179 if !isLittleEndian() { 180 endian = ".be" 181 } 182 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 183 logger := log.New("epoch", c.epoch) 184 185 // Try to load the file from disk and memory map it 186 var err error 187 c.dump, c.mmap, c.cache, err = memoryMap(path) 188 if err == nil { 189 logger.Debug("Loaded old ethash cache from disk") 190 return 191 } 192 logger.Debug("Failed to load old ethash cache", "err", err) 193 194 // No previous cache available, create a new cache file to fill 195 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 196 if err != nil { 197 logger.Error("Failed to generate mapped ethash cache", "err", err) 198 199 c.cache = make([]uint32, size/4) 200 generateCache(c.cache, c.epoch, seed) 201 } 202 // Iterate over all previous instances and delete old ones 203 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 204 seed := seedHash(uint64(ep)*epochLength + 1) 205 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 206 os.Remove(path) 207 } 208 }) 209 } 210 211 // release closes any file handlers and memory maps open. 212 func (c *cache) release() { 213 if c.mmap != nil { 214 c.mmap.Unmap() 215 c.mmap = nil 216 } 217 if c.dump != nil { 218 c.dump.Close() 219 c.dump = nil 220 } 221 } 222 223 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 224 type dataset struct { 225 epoch uint64 // Epoch for which this cache is relevant 226 227 dump *os.File // File descriptor of the memory mapped cache 228 mmap mmap.MMap // Memory map itself to unmap before releasing 229 230 dataset []uint32 // The actual cache data content 231 used time.Time // Timestamp of the last use for smarter eviction 232 once sync.Once // Ensures the cache is generated only once 233 lock sync.Mutex // Ensures thread safety for updating the usage time 234 } 235 236 // generate ensures that the dataset content is generated before use. 237 func (d *dataset) generate(dir string, limit int, test bool) { 238 d.once.Do(func() { 239 // If we have a testing dataset, generate and return 240 if test { 241 cache := make([]uint32, 1024/4) 242 generateCache(cache, d.epoch, seedHash(d.epoch*epochLength+1)) 243 244 d.dataset = make([]uint32, 32*1024/4) 245 generateDataset(d.dataset, d.epoch, cache) 246 247 return 248 } 249 // If we don't store anything on disk, generate and return 250 csize := cacheSize(d.epoch*epochLength + 1) 251 dsize := datasetSize(d.epoch*epochLength + 1) 252 seed := seedHash(d.epoch*epochLength + 1) 253 254 if dir == "" { 255 cache := make([]uint32, csize/4) 256 generateCache(cache, d.epoch, seed) 257 258 d.dataset = make([]uint32, dsize/4) 259 generateDataset(d.dataset, d.epoch, cache) 260 } 261 // Disk storage is needed, this will get fancy 262 var endian string 263 if !isLittleEndian() { 264 endian = ".be" 265 } 266 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 267 logger := log.New("epoch", d.epoch) 268 269 // Try to load the file from disk and memory map it 270 var err error 271 d.dump, d.mmap, d.dataset, err = memoryMap(path) 272 if err == nil { 273 logger.Debug("Loaded old ethash dataset from disk") 274 return 275 } 276 logger.Debug("Failed to load old ethash dataset", "err", err) 277 278 // No previous dataset available, create a new dataset file to fill 279 cache := make([]uint32, csize/4) 280 generateCache(cache, d.epoch, seed) 281 282 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 283 if err != nil { 284 logger.Error("Failed to generate mapped ethash dataset", "err", err) 285 286 d.dataset = make([]uint32, dsize/2) 287 generateDataset(d.dataset, d.epoch, cache) 288 } 289 // Iterate over all previous instances and delete old ones 290 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 291 seed := seedHash(uint64(ep)*epochLength + 1) 292 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 293 os.Remove(path) 294 } 295 }) 296 } 297 298 // release closes any file handlers and memory maps open. 299 func (d *dataset) release() { 300 if d.mmap != nil { 301 d.mmap.Unmap() 302 d.mmap = nil 303 } 304 if d.dump != nil { 305 d.dump.Close() 306 d.dump = nil 307 } 308 } 309 310 // MakeCache generates a new ethash cache and optionally stores it to disk. 311 func MakeCache(block uint64, dir string) { 312 c := cache{epoch: block/epochLength + 1} 313 c.generate(dir, math.MaxInt32, false) 314 c.release() 315 } 316 317 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 318 func MakeDataset(block uint64, dir string) { 319 d := dataset{epoch: block/epochLength + 1} 320 d.generate(dir, math.MaxInt32, false) 321 d.release() 322 } 323 324 // Ethash is a PoW data struture implementing the ethash algorithm. 325 type Ethash struct { 326 cachedir string // Data directory to store the verification caches 327 cachesinmem int // Number of caches to keep in memory 328 cachesondisk int // Number of caches to keep on disk 329 dagdir string // Data directory to store full mining datasets 330 dagsinmem int // Number of mining datasets to keep in memory 331 dagsondisk int // Number of mining datasets to keep on disk 332 333 caches map[uint64]*cache // In memory caches to avoid regenerating too often 334 fcache *cache // Pre-generated cache for the estimated future epoch 335 datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often 336 fdataset *dataset // Pre-generated dataset for the estimated future epoch 337 lock sync.Mutex // Ensures thread safety for the in-memory caches 338 339 hashrate metrics.Meter // Meter tracking the average hashrate 340 341 tester bool // Flag whether to use a smaller test dataset 342 } 343 344 // NewFullEthash creates a full sized ethash PoW scheme. 345 func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) PoW { 346 if cachesinmem <= 0 { 347 log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem) 348 cachesinmem = 1 349 } 350 if cachedir != "" && cachesondisk > 0 { 351 log.Info("Disk storage enabled for ethash caches", "dir", cachedir, "count", cachesondisk) 352 } 353 if dagdir != "" && dagsondisk > 0 { 354 log.Info("Disk storage enabled for ethash DAGs", "dir", dagdir, "count", dagsondisk) 355 } 356 return &Ethash{ 357 cachedir: cachedir, 358 cachesinmem: cachesinmem, 359 cachesondisk: cachesondisk, 360 dagdir: dagdir, 361 dagsinmem: dagsinmem, 362 dagsondisk: dagsondisk, 363 caches: make(map[uint64]*cache), 364 datasets: make(map[uint64]*dataset), 365 hashrate: metrics.NewMeter(), 366 } 367 } 368 369 // NewTestEthash creates a small sized ethash PoW scheme useful only for testing 370 // purposes. 371 func NewTestEthash() PoW { 372 return &Ethash{ 373 cachesinmem: 1, 374 caches: make(map[uint64]*cache), 375 datasets: make(map[uint64]*dataset), 376 tester: true, 377 hashrate: metrics.NewMeter(), 378 } 379 } 380 381 // NewSharedEthash creates a full sized ethash PoW shared between all requesters 382 // running in the same process. 383 func NewSharedEthash() PoW { 384 return sharedEthash 385 } 386 387 // Verify implements PoW, checking whether the given block satisfies the PoW 388 // difficulty requirements. 389 func (ethash *Ethash) Verify(block Block) error { 390 // Sanity check that the block number is below the lookup table size (60M blocks) 391 number := block.NumberU64() 392 if number/epochLength >= uint64(len(cacheSizes)) { 393 // Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check) 394 return ErrNonceOutOfRange 395 } 396 // Ensure that we have a valid difficulty for the block 397 difficulty := block.Difficulty() 398 if difficulty.Sign() <= 0 { 399 return ErrInvalidDifficulty 400 } 401 // Recompute the digest and PoW value and verify against the block 402 cache := ethash.cache(number) 403 404 size := datasetSize(number) 405 if ethash.tester { 406 size = 32 * 1024 407 } 408 digest, result := hashimotoLight(size, cache, block.HashNoNonce().Bytes(), block.Nonce()) 409 if !bytes.Equal(block.MixDigest().Bytes(), digest) { 410 return ErrInvalidMixDigest 411 } 412 target := new(big.Int).Div(maxUint256, difficulty) 413 if new(big.Int).SetBytes(result).Cmp(target) > 0 { 414 return ErrInvalidPoW 415 } 416 return nil 417 } 418 419 // cache tries to retrieve a verification cache for the specified block number 420 // by first checking against a list of in-memory caches, then against caches 421 // stored on disk, and finally generating one if none can be found. 422 func (ethash *Ethash) cache(block uint64) []uint32 { 423 epoch := block / epochLength 424 425 // If we have a PoW for that epoch, use that 426 ethash.lock.Lock() 427 428 current, future := ethash.caches[epoch], (*cache)(nil) 429 if current == nil { 430 // No in-memory cache, evict the oldest if the cache limit was reached 431 for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.cachesinmem { 432 var evict *cache 433 for _, cache := range ethash.caches { 434 if evict == nil || evict.used.After(cache.used) { 435 evict = cache 436 } 437 } 438 delete(ethash.caches, evict.epoch) 439 evict.release() 440 441 log.Trace("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used) 442 } 443 // If we have the new cache pre-generated, use that, otherwise create a new one 444 if ethash.fcache != nil && ethash.fcache.epoch == epoch { 445 log.Trace("Using pre-generated cache", "epoch", epoch) 446 current, ethash.fcache = ethash.fcache, nil 447 } else { 448 log.Trace("Requiring new ethash cache", "epoch", epoch) 449 current = &cache{epoch: epoch} 450 } 451 ethash.caches[epoch] = current 452 453 // If we just used up the future cache, or need a refresh, regenerate 454 if ethash.fcache == nil || ethash.fcache.epoch <= epoch { 455 if ethash.fcache != nil { 456 ethash.fcache.release() 457 } 458 log.Trace("Requiring new future ethash cache", "epoch", epoch+1) 459 future = &cache{epoch: epoch + 1} 460 ethash.fcache = future 461 } 462 } 463 current.used = time.Now() 464 ethash.lock.Unlock() 465 466 // Wait for generation finish, bump the timestamp and finalize the cache 467 current.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester) 468 469 current.lock.Lock() 470 current.used = time.Now() 471 current.lock.Unlock() 472 473 // If we exhausted the future cache, now's a good time to regenerate it 474 if future != nil { 475 go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester) 476 } 477 return current.cache 478 } 479 480 // Search implements PoW, attempting to find a nonce that satisfies the block's 481 // difficulty requirements. 482 func (ethash *Ethash) Search(block Block, stop <-chan struct{}) (uint64, []byte) { 483 var ( 484 hash = block.HashNoNonce().Bytes() 485 diff = block.Difficulty() 486 target = new(big.Int).Div(maxUint256, diff) 487 dataset = ethash.dataset(block.NumberU64()) 488 rand = rand.New(rand.NewSource(time.Now().UnixNano())) 489 nonce = uint64(rand.Int63()) 490 attempts int64 491 ) 492 // Start generating random nonces until we abort or find a good one 493 for { 494 select { 495 case <-stop: 496 // Mining terminated, update stats and abort 497 ethash.hashrate.Mark(attempts) 498 return 0, nil 499 500 default: 501 // We don't have to update hash rate on every nonce, so update after after 2^X nonces 502 attempts++ 503 if (attempts % (1 << 15)) == 0 { 504 ethash.hashrate.Mark(attempts) 505 attempts = 0 506 } 507 // Compute the PoW value of this nonce 508 digest, result := hashimotoFull(dataset, hash, nonce) 509 if new(big.Int).SetBytes(result).Cmp(target) <= 0 { 510 return nonce, digest 511 } 512 nonce++ 513 } 514 } 515 } 516 517 // dataset tries to retrieve a mining dataset for the specified block number 518 // by first checking against a list of in-memory datasets, then against DAGs 519 // stored on disk, and finally generating one if none can be found. 520 func (ethash *Ethash) dataset(block uint64) []uint32 { 521 epoch := block / epochLength 522 523 // If we have a PoW for that epoch, use that 524 ethash.lock.Lock() 525 526 current, future := ethash.datasets[epoch], (*dataset)(nil) 527 if current == nil { 528 // No in-memory dataset, evict the oldest if the dataset limit was reached 529 for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.dagsinmem { 530 var evict *dataset 531 for _, dataset := range ethash.datasets { 532 if evict == nil || evict.used.After(dataset.used) { 533 evict = dataset 534 } 535 } 536 delete(ethash.datasets, evict.epoch) 537 evict.release() 538 539 log.Trace("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used) 540 } 541 // If we have the new cache pre-generated, use that, otherwise create a new one 542 if ethash.fdataset != nil && ethash.fdataset.epoch == epoch { 543 log.Trace("Using pre-generated dataset", "epoch", epoch) 544 current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk 545 ethash.fdataset = nil 546 } else { 547 log.Trace("Requiring new ethash dataset", "epoch", epoch) 548 current = &dataset{epoch: epoch} 549 } 550 ethash.datasets[epoch] = current 551 552 // If we just used up the future dataset, or need a refresh, regenerate 553 if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch { 554 if ethash.fdataset != nil { 555 ethash.fdataset.release() 556 } 557 log.Trace("Requiring new future ethash dataset", "epoch", epoch+1) 558 future = &dataset{epoch: epoch + 1} 559 ethash.fdataset = future 560 } 561 } 562 current.used = time.Now() 563 ethash.lock.Unlock() 564 565 // Wait for generation finish, bump the timestamp and finalize the cache 566 current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester) 567 568 current.lock.Lock() 569 current.used = time.Now() 570 current.lock.Unlock() 571 572 // If we exhausted the future dataset, now's a good time to regenerate it 573 if future != nil { 574 go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester) 575 } 576 return current.dataset 577 } 578 579 // Hashrate implements PoW, returning the measured rate of the search invocations 580 // per second over the last minute. 581 func (ethash *Ethash) Hashrate() float64 { 582 return ethash.hashrate.Rate1() 583 } 584 585 // EthashSeedHash is the seed to use for generating a vrification cache and the 586 // mining dataset. 587 func EthashSeedHash(block uint64) []byte { 588 return seedHash(block) 589 }