github.com/halybang/go-ethereum@v1.0.5-0.20180325041310-3b262bc1367c/consensus/ethash/ethash.go (about) 1 // Copyright 2018 Wanchain Foundation Ltd 2 // Copyright 2017 The go-ethereum Authors 3 // This file is part of the go-ethereum library. 4 // 5 // The go-ethereum library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-ethereum library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 17 18 // Package ethash implements the ethash proof-of-work consensus engine. 19 package ethash 20 21 import ( 22 "errors" 23 "fmt" 24 "math" 25 "math/big" 26 "math/rand" 27 "os" 28 "path/filepath" 29 "reflect" 30 "runtime" 31 "strconv" 32 "sync" 33 "time" 34 "unsafe" 35 36 mmap "github.com/edsrzf/mmap-go" 37 lruCache "github.com/hashicorp/golang-lru" 38 metrics "github.com/rcrowley/go-metrics" 39 //"github.com/wanchain/go-wanchain/common" 40 "github.com/wanchain/go-wanchain/consensus" 41 //"github.com/wanchain/go-wanchain/ethdb" 42 "github.com/wanchain/go-wanchain/log" 43 "github.com/wanchain/go-wanchain/rpc" 44 "github.com/hashicorp/golang-lru/simplelru" 45 "github.com/wanchain/go-wanchain/ethdb" 46 "github.com/wanchain/go-wanchain/common" 47 48 ) 49 50 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 51 52 var ( 53 // maxUint256 is a big integer representing 2^256-1 54 maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 55 56 // sharedEthash is a full instance that can be shared between multiple users. 57 sharedEthash = NewWithCfg(Config{"", 3, 0, "", 1, 0, ModeNormal}) 58 59 // algorithmRevision is the data structure version used for file naming. 60 algorithmRevision = 23 61 62 // dumpMagic is a dataset dump header to sanity check a data dump. 63 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 64 ) 65 66 // isLittleEndian returns whether the local system is running in little or big 67 // endian byte order. 68 func isLittleEndian() bool { 69 n := uint32(0x01020304) 70 return *(*byte)(unsafe.Pointer(&n)) == 0x04 71 } 72 73 // memoryMap tries to memory map a file of uint32s for read only access. 74 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 75 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 76 if err != nil { 77 return nil, nil, nil, err 78 } 79 mem, buffer, err := memoryMapFile(file, false) 80 if err != nil { 81 file.Close() 82 return nil, nil, nil, err 83 } 84 for i, magic := range dumpMagic { 85 if buffer[i] != magic { 86 mem.Unmap() 87 file.Close() 88 return nil, nil, nil, ErrInvalidDumpMagic 89 } 90 } 91 return file, mem, buffer[len(dumpMagic):], err 92 } 93 94 // memoryMapFile tries to memory map an already opened file descriptor. 95 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 96 // Try to memory map the file 97 flag := mmap.RDONLY 98 if write { 99 flag = mmap.RDWR 100 } 101 mem, err := mmap.Map(file, flag, 0) 102 if err != nil { 103 return nil, nil, err 104 } 105 // Yay, we managed to memory map the file, here be dragons 106 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 107 header.Len /= 4 108 header.Cap /= 4 109 110 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 111 } 112 113 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 114 // access, fill it with the data from a generator and then move it into the final 115 // path requested. 116 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 117 // Ensure the data folder exists 118 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 119 return nil, nil, nil, err 120 } 121 // Create a huge temporary empty file to fill with data 122 temp := path + "." + strconv.Itoa(rand.Int()) 123 124 dump, err := os.Create(temp) 125 if err != nil { 126 return nil, nil, nil, err 127 } 128 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 129 return nil, nil, nil, err 130 } 131 // Memory map the file for writing and fill it with the generator 132 mem, buffer, err := memoryMapFile(dump, true) 133 if err != nil { 134 dump.Close() 135 return nil, nil, nil, err 136 } 137 copy(buffer, dumpMagic) 138 139 data := buffer[len(dumpMagic):] 140 generator(data) 141 142 if err := mem.Unmap(); err != nil { 143 return nil, nil, nil, err 144 } 145 if err := dump.Close(); err != nil { 146 return nil, nil, nil, err 147 } 148 if err := os.Rename(temp, path); err != nil { 149 return nil, nil, nil, err 150 } 151 return memoryMap(path) 152 } 153 154 // lru tracks caches or datasets by their last use time, keeping at most N of them. 155 type lru struct { 156 what string 157 new func(epoch uint64) interface{} 158 mu sync.Mutex 159 // Items are kept in a LRU cache, but there is a special case: 160 // We always keep an item for (highest seen epoch) + 1 as the 'future item'. 161 cache *simplelru.LRU 162 future uint64 163 futureItem interface{} 164 } 165 166 // newlru create a new least-recently-used cache for ither the verification caches 167 // or the mining datasets. 168 func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { 169 if maxItems <= 0 { 170 maxItems = 1 171 } 172 cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { 173 log.Trace("Evicted ethash "+what, "epoch", key) 174 }) 175 return &lru{what: what, new: new, cache: cache} 176 } 177 178 // get retrieves or creates an item for the given epoch. The first return value is always 179 // non-nil. The second return value is non-nil if lru thinks that an item will be useful in 180 // the near future. 181 func (lru *lru) get(epoch uint64) (item, future interface{}) { 182 lru.mu.Lock() 183 defer lru.mu.Unlock() 184 185 // Get or create the item for the requested epoch. 186 item, ok := lru.cache.Get(epoch) 187 if !ok { 188 if lru.future > 0 && lru.future == epoch { 189 item = lru.futureItem 190 } else { 191 log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) 192 item = lru.new(epoch) 193 } 194 lru.cache.Add(epoch, item) 195 } 196 // Update the 'future item' if epoch is larger than previously seen. 197 if epoch < maxEpoch-1 && lru.future < epoch+1 { 198 log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) 199 future = lru.new(epoch + 1) 200 lru.future = epoch + 1 201 lru.futureItem = future 202 } 203 return item, future 204 } 205 206 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 207 type cache struct { 208 epoch uint64 // Epoch for which this cache is relevant 209 dump *os.File // File descriptor of the memory mapped cache 210 mmap mmap.MMap // Memory map itself to unmap before releasing 211 cache []uint32 // The actual cache data content (may be memory mapped) 212 once sync.Once // Ensures the cache is generated only once 213 } 214 215 // newCache creates a new ethash verification cache and returns it as a plain Go 216 // interface to be usable in an LRU cache. 217 func newCache(epoch uint64) interface{} { 218 return &cache{epoch: epoch} 219 } 220 221 // generate ensures that the cache content is generated before use. 222 func (c *cache) generate(dir string, limit int, test bool) { 223 c.once.Do(func() { 224 size := cacheSize(c.epoch*epochLength + 1) 225 seed := seedHash(c.epoch*epochLength + 1) 226 if test { 227 size = 1024 228 } 229 // If we don't store anything on disk, generate and return. 230 if dir == "" { 231 c.cache = make([]uint32, size/4) 232 generateCache(c.cache, c.epoch, seed) 233 return 234 } 235 // Disk storage is needed, this will get fancy 236 var endian string 237 if !isLittleEndian() { 238 endian = ".be" 239 } 240 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 241 logger := log.New("epoch", c.epoch) 242 243 // We're about to mmap the file, ensure that the mapping is cleaned up when the 244 // cache becomes unused. 245 runtime.SetFinalizer(c, (*cache).finalizer) 246 247 // Try to load the file from disk and memory map it 248 var err error 249 c.dump, c.mmap, c.cache, err = memoryMap(path) 250 if err == nil { 251 logger.Debug("Loaded old ethash cache from disk") 252 return 253 } 254 logger.Debug("Failed to load old ethash cache", "err", err) 255 256 // No previous cache available, create a new cache file to fill 257 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 258 if err != nil { 259 logger.Error("Failed to generate mapped ethash cache", "err", err) 260 261 c.cache = make([]uint32, size/4) 262 generateCache(c.cache, c.epoch, seed) 263 } 264 // Iterate over all previous instances and delete old ones 265 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 266 seed := seedHash(uint64(ep)*epochLength + 1) 267 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 268 os.Remove(path) 269 } 270 }) 271 } 272 273 // finalizer unmaps the memory and closes the file. 274 func (c *cache) finalizer() { 275 if c.mmap != nil { 276 c.mmap.Unmap() 277 c.dump.Close() 278 c.mmap, c.dump = nil, nil 279 } 280 } 281 282 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 283 type dataset struct { 284 epoch uint64 // Epoch for which this cache is relevant 285 dump *os.File // File descriptor of the memory mapped cache 286 mmap mmap.MMap // Memory map itself to unmap before releasing 287 dataset []uint32 // The actual cache data content 288 once sync.Once // Ensures the cache is generated only once 289 } 290 291 // newDataset creates a new ethash mining dataset and returns it as a plain Go 292 // interface to be usable in an LRU cache. 293 func newDataset(epoch uint64) interface{} { 294 return &dataset{epoch: epoch} 295 } 296 297 // generate ensures that the dataset content is generated before use. 298 func (d *dataset) generate(dir string, limit int, test bool) { 299 d.once.Do(func() { 300 csize := cacheSize(d.epoch*epochLength + 1) 301 dsize := datasetSize(d.epoch*epochLength + 1) 302 seed := seedHash(d.epoch*epochLength + 1) 303 if test { 304 csize = 1024 305 dsize = 32 * 1024 306 } 307 // If we don't store anything on disk, generate and return 308 if dir == "" { 309 cache := make([]uint32, csize/4) 310 generateCache(cache, d.epoch, seed) 311 312 d.dataset = make([]uint32, dsize/4) 313 generateDataset(d.dataset, d.epoch, cache) 314 } 315 // Disk storage is needed, this will get fancy 316 var endian string 317 if !isLittleEndian() { 318 endian = ".be" 319 } 320 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 321 logger := log.New("epoch", d.epoch) 322 323 // We're about to mmap the file, ensure that the mapping is cleaned up when the 324 // cache becomes unused. 325 runtime.SetFinalizer(d, (*dataset).finalizer) 326 327 // Try to load the file from disk and memory map it 328 var err error 329 d.dump, d.mmap, d.dataset, err = memoryMap(path) 330 if err == nil { 331 logger.Debug("Loaded old ethash dataset from disk") 332 return 333 } 334 logger.Debug("Failed to load old ethash dataset", "err", err) 335 336 // No previous dataset available, create a new dataset file to fill 337 cache := make([]uint32, csize/4) 338 generateCache(cache, d.epoch, seed) 339 340 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 341 if err != nil { 342 logger.Error("Failed to generate mapped ethash dataset", "err", err) 343 344 d.dataset = make([]uint32, dsize/2) 345 generateDataset(d.dataset, d.epoch, cache) 346 } 347 // Iterate over all previous instances and delete old ones 348 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 349 seed := seedHash(uint64(ep)*epochLength + 1) 350 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 351 os.Remove(path) 352 } 353 }) 354 } 355 356 // finalizer closes any file handlers and memory maps open. 357 func (d *dataset) finalizer() { 358 if d.mmap != nil { 359 d.mmap.Unmap() 360 d.dump.Close() 361 d.mmap, d.dump = nil, nil 362 } 363 } 364 365 // MakeCache generates a new ethash cache and optionally stores it to disk. 366 func MakeCache(block uint64, dir string) { 367 c := cache{epoch: block / epochLength} 368 c.generate(dir, math.MaxInt32, false) 369 } 370 371 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 372 func MakeDataset(block uint64, dir string) { 373 d := dataset{epoch: block / epochLength} 374 d.generate(dir, math.MaxInt32, false) 375 } 376 377 // Mode defines the type and amount of PoW verification an ethash engine makes. 378 type Mode uint 379 380 const ( 381 ModeNormal Mode = iota 382 ModeShared 383 ModeTest 384 ModeFake 385 ModeFullFake 386 ) 387 388 // Config are the configuration parameters of the ethash. 389 type Config struct { 390 CacheDir string 391 CachesInMem int 392 CachesOnDisk int 393 DatasetDir string 394 DatasetsInMem int 395 DatasetsOnDisk int 396 PowMode Mode 397 } 398 399 // Ethash is a consensus engine based on proot-of-work implementing the ethash 400 // algorithm. 401 type Ethash struct { 402 config Config 403 404 caches *lru // In memory caches to avoid regenerating too often 405 datasets *lru // In memory datasets to avoid regenerating too often 406 407 // Mining related fields 408 rand *rand.Rand // Properly seeded random source for nonces 409 threads int // Number of threads to mine on if mining 410 update chan struct{} // Notification channel to update mining parameters 411 hashrate metrics.Meter // Meter tracking the average hashrate 412 413 // The fields below are hooks for testing 414 shared *Ethash // Shared PoW verifier to avoid cache regeneration 415 fakeFail uint64 // Block number which fails PoW check even in fake mode 416 fakeDelay time.Duration // Time delay to sleep for before returning from verify 417 418 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 419 420 db ethdb.Database 421 recents *lruCache.ARCCache 422 signer common.Address 423 signFn SignerFn 424 } 425 426 // New creates a full sized ethash PoW scheme. 427 //func New(config Config) *Ethash { 428 func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int, db ethdb.Database) *Ethash { 429 //config := Config{CachesInMem:cachesinmem,CacheDir:cachedir,CachesOnDisk:cachesondisk} 430 431 config := Config{ 432 CacheDir: cachedir, 433 CachesInMem: cachesinmem, 434 CachesOnDisk: cachesondisk, 435 DatasetDir: dagdir, 436 DatasetsInMem: dagsinmem, 437 DatasetsOnDisk: dagsondisk, 438 } 439 440 if config.CachesInMem <= 0 { 441 log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 442 config.CachesInMem = 1 443 } 444 if config.CacheDir != "" && config.CachesOnDisk > 0 { 445 log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 446 } 447 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 448 log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 449 } 450 451 recents, _ := lruCache.NewARC(256) 452 453 return &Ethash{ 454 config: config, 455 caches: newlru("cache", config.CachesInMem, newCache), 456 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 457 update: make(chan struct{}), 458 hashrate: metrics.NewMeter(), 459 db: db, 460 recents: recents, 461 } 462 } 463 464 func NewWithCfg(config Config) *Ethash { 465 if config.CachesInMem <= 0 { 466 log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 467 config.CachesInMem = 1 468 } 469 if config.CacheDir != "" && config.CachesOnDisk > 0 { 470 log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 471 } 472 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 473 log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 474 } 475 return &Ethash{ 476 config: config, 477 caches: newlru("cache", config.CachesInMem, newCache), 478 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 479 update: make(chan struct{}), 480 hashrate: metrics.NewMeter(), 481 } 482 } 483 // NewTester creates a small sized ethash PoW scheme useful only for testing 484 // purposes. 485 func NewTester(db ethdb.Database) *Ethash { 486 // create a signer cache 487 recents, _ := lruCache.NewARC(256) 488 489 return &Ethash{ 490 491 config:Config{ CachesInMem: 1,PowMode: ModeFake}, 492 update: make(chan struct{}), 493 hashrate: metrics.NewMeter(), 494 recents: recents, 495 db: db, 496 } 497 498 //return NewWithCfg(Config{CachesInMem: 1, PowMode: ModeTest}) 499 } 500 501 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 502 // all blocks' seal as valid, though they still have to conform to the Ethereum 503 // consensus rules. 504 func NewFaker(db ethdb.Database) *Ethash { 505 recents, _ := lruCache.NewARC(256) 506 return &Ethash{ 507 recents: recents, 508 config: Config{ 509 PowMode: ModeFake, 510 }, 511 db: db, 512 } 513 } 514 515 516 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 517 // accepts all blocks as valid apart from the single one specified, though they 518 // still have to conform to the Ethereum consensus rules. 519 func NewFakeFailer(fail uint64, db ethdb.Database) *Ethash { 520 recents, _ := lruCache.NewARC(256) 521 522 return &Ethash{ 523 config: Config{ 524 PowMode: ModeFake, 525 }, 526 fakeFail: fail, 527 recents: recents, 528 db: db, 529 } 530 } 531 532 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 533 // accepts all blocks as valid, but delays verifications by some time, though 534 // they still have to conform to the Ethereum consensus rules. 535 536 func NewFakeDelayer(delay time.Duration, db ethdb.Database) *Ethash { 537 538 recents, _ := lruCache.NewARC(256) 539 540 return &Ethash{ 541 config: Config{ 542 PowMode: ModeFake, 543 }, 544 fakeDelay: delay, 545 recents: recents, 546 db: db, 547 } 548 } 549 550 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 551 // accepts all blocks as valid, without checking any consensus rules whatsoever. 552 553 554 func NewFullFaker(db ethdb.Database) *Ethash { 555 recents, _ := lruCache.NewARC(256) 556 return &Ethash{ 557 config: Config{ 558 PowMode: ModeFullFake, 559 }, 560 db: db, 561 recents: recents, 562 } 563 } 564 565 // NewShared creates a full sized ethash PoW shared between all requesters running 566 // in the same process. 567 func NewShared() *Ethash { 568 return &Ethash{shared: sharedEthash} 569 } 570 571 // cache tries to retrieve a verification cache for the specified block number 572 // by first checking against a list of in-memory caches, then against caches 573 // stored on disk, and finally generating one if none can be found. 574 func (ethash *Ethash) cache(block uint64) *cache { 575 epoch := block / epochLength 576 currentI, futureI := ethash.caches.get(epoch) 577 current := currentI.(*cache) 578 579 // Wait for generation finish. 580 current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 581 582 // If we need a new future cache, now's a good time to regenerate it. 583 if futureI != nil { 584 future := futureI.(*cache) 585 go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 586 } 587 return current 588 } 589 590 // dataset tries to retrieve a mining dataset for the specified block number 591 // by first checking against a list of in-memory datasets, then against DAGs 592 // stored on disk, and finally generating one if none can be found. 593 func (ethash *Ethash) dataset(block uint64) *dataset { 594 epoch := block / epochLength 595 currentI, futureI := ethash.datasets.get(epoch) 596 current := currentI.(*dataset) 597 598 // Wait for generation finish. 599 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 600 601 // If we need a new future dataset, now's a good time to regenerate it. 602 if futureI != nil { 603 future := futureI.(*dataset) 604 go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 605 } 606 607 return current 608 } 609 610 // Threads returns the number of mining threads currently enabled. This doesn't 611 // necessarily mean that mining is running! 612 func (ethash *Ethash) Threads() int { 613 ethash.lock.Lock() 614 defer ethash.lock.Unlock() 615 616 return ethash.threads 617 } 618 619 // SetThreads updates the number of mining threads currently enabled. Calling 620 // this method does not start mining, only sets the thread count. If zero is 621 // specified, the miner will use all cores of the machine. Setting a thread 622 // count below zero is allowed and will cause the miner to idle, without any 623 // work being done. 624 func (ethash *Ethash) SetThreads(threads int) { 625 ethash.lock.Lock() 626 defer ethash.lock.Unlock() 627 628 // If we're running a shared PoW, set the thread count on that instead 629 if ethash.shared != nil { 630 ethash.shared.SetThreads(threads) 631 return 632 } 633 // Update the threads and ping any running seal to pull in any changes 634 ethash.threads = threads 635 select { 636 case ethash.update <- struct{}{}: 637 default: 638 } 639 } 640 641 // Hashrate implements PoW, returning the measured rate of the search invocations 642 // per second over the last minute. 643 func (ethash *Ethash) Hashrate() float64 { 644 return ethash.hashrate.Rate1() 645 } 646 647 // APIs implements consensus.Engine, returning the user facing RPC APIs. Currently 648 // that is empty. 649 func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { 650 return nil 651 } 652 653 // SeedHash is the seed to use for generating a verification cache and the mining 654 // dataset. 655 func SeedHash(block uint64) []byte { 656 return seedHash(block) 657 }