github.com/waltonchain/waltonchain_gwtc_src@v1.1.4-0.20201225072101-8a298c95a819/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-wtc library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-wtc library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "strconv" 30 "sync" 31 "time" 32 "unsafe" 33 34 mmap "github.com/edsrzf/mmap-go" 35 "github.com/wtc/go-wtc/consensus" 36 "github.com/wtc/go-wtc/log" 37 "github.com/wtc/go-wtc/rpc" 38 metrics "github.com/rcrowley/go-metrics" 39 ) 40 41 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 42 43 var ( 44 // maxUint256 is a big integer representing 2^256-1 45 maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 46 47 // sharedEthash is a full instance that can be shared between multiple users. 48 sharedEthash = New("", 3, 0, "", 1, 0, false, 12125, 10240) 49 50 // algorithmRevision is the data structure version used for file naming. 51 algorithmRevision = 23 52 53 // dumpMagic is a dataset dump header to sanity check a data dump. 54 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 55 ) 56 57 // isLittleEndian returns whether the local system is running in little or big 58 // endian byte order. 59 func isLittleEndian() bool { 60 n := uint32(0x01020304) 61 return *(*byte)(unsafe.Pointer(&n)) == 0x04 62 } 63 64 // memoryMap tries to memory map a file of uint32s for read only access. 65 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 66 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 67 if err != nil { 68 return nil, nil, nil, err 69 } 70 mem, buffer, err := memoryMapFile(file, false) 71 if err != nil { 72 file.Close() 73 return nil, nil, nil, err 74 } 75 for i, magic := range dumpMagic { 76 if buffer[i] != magic { 77 mem.Unmap() 78 file.Close() 79 return nil, nil, nil, ErrInvalidDumpMagic 80 } 81 } 82 return file, mem, buffer[len(dumpMagic):], err 83 } 84 85 // memoryMapFile tries to memory map an already opened file descriptor. 86 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 87 // Try to memory map the file 88 flag := mmap.RDONLY 89 if write { 90 flag = mmap.RDWR 91 } 92 mem, err := mmap.Map(file, flag, 0) 93 if err != nil { 94 return nil, nil, err 95 } 96 // Yay, we managed to memory map the file, here be dragons 97 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 98 header.Len /= 4 99 header.Cap /= 4 100 101 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 102 } 103 104 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 105 // access, fill it with the data from a generator and then move it into the final 106 // path requested. 107 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 108 // Ensure the data folder exists 109 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 110 return nil, nil, nil, err 111 } 112 // Create a huge temporary empty file to fill with data 113 temp := path + "." + strconv.Itoa(rand.Int()) 114 115 dump, err := os.Create(temp) 116 if err != nil { 117 return nil, nil, nil, err 118 } 119 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 120 return nil, nil, nil, err 121 } 122 // Memory map the file for writing and fill it with the generator 123 mem, buffer, err := memoryMapFile(dump, true) 124 if err != nil { 125 dump.Close() 126 return nil, nil, nil, err 127 } 128 copy(buffer, dumpMagic) 129 130 data := buffer[len(dumpMagic):] 131 generator(data) 132 133 if err := mem.Unmap(); err != nil { 134 return nil, nil, nil, err 135 } 136 if err := dump.Close(); err != nil { 137 return nil, nil, nil, err 138 } 139 if err := os.Rename(temp, path); err != nil { 140 return nil, nil, nil, err 141 } 142 return memoryMap(path) 143 } 144 145 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 146 type cache struct { 147 epoch uint64 // Epoch for which this cache is relevant 148 149 dump *os.File // File descriptor of the memory mapped cache 150 mmap mmap.MMap // Memory map itself to unmap before releasing 151 152 cache []uint32 // The actual cache data content (may be memory mapped) 153 used time.Time // Timestamp of the last use for smarter eviction 154 once sync.Once // Ensures the cache is generated only once 155 lock sync.Mutex // Ensures thread safety for updating the usage time 156 } 157 158 // generate ensures that the cache content is generated before use. 159 func (c *cache) generate(dir string, limit int, test bool) { 160 c.once.Do(func() { 161 // If we have a testing cache, generate and return 162 if test { 163 c.cache = make([]uint32, 1024/4) 164 generateCache(c.cache, c.epoch, seedHash(c.epoch*epochLength+1)) 165 return 166 } 167 // If we don't store anything on disk, generate and return 168 size := cacheSize(c.epoch*epochLength + 1) 169 seed := seedHash(c.epoch*epochLength + 1) 170 171 if dir == "" { 172 c.cache = make([]uint32, size/4) 173 generateCache(c.cache, c.epoch, seed) 174 return 175 } 176 // Disk storage is needed, this will get fancy 177 var endian string 178 if !isLittleEndian() { 179 endian = ".be" 180 } 181 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 182 logger := log.New("epoch", c.epoch) 183 184 // Try to load the file from disk and memory map it 185 var err error 186 c.dump, c.mmap, c.cache, err = memoryMap(path) 187 if err == nil { 188 logger.Debug("Loaded old ethash cache from disk") 189 return 190 } 191 logger.Debug("Failed to load old ethash cache", "err", err) 192 193 // No previous cache available, create a new cache file to fill 194 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 195 if err != nil { 196 logger.Error("Failed to generate mapped ethash cache", "err", err) 197 198 c.cache = make([]uint32, size/4) 199 generateCache(c.cache, c.epoch, seed) 200 } 201 // Iterate over all previous instances and delete old ones 202 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 203 seed := seedHash(uint64(ep)*epochLength + 1) 204 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 205 os.Remove(path) 206 } 207 }) 208 } 209 210 // release closes any file handlers and memory maps open. 211 func (c *cache) release() { 212 if c.mmap != nil { 213 c.mmap.Unmap() 214 c.mmap = nil 215 } 216 if c.dump != nil { 217 c.dump.Close() 218 c.dump = nil 219 } 220 } 221 222 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 223 type dataset struct { 224 epoch uint64 // Epoch for which this cache is relevant 225 226 dump *os.File // File descriptor of the memory mapped cache 227 mmap mmap.MMap // Memory map itself to unmap before releasing 228 229 dataset []uint32 // The actual cache data content 230 used time.Time // Timestamp of the last use for smarter eviction 231 once sync.Once // Ensures the cache is generated only once 232 lock sync.Mutex // Ensures thread safety for updating the usage time 233 } 234 235 // generate ensures that the dataset content is generated before use. 236 func (d *dataset) generate(dir string, limit int, test bool) { 237 d.once.Do(func() { 238 // If we have a testing dataset, generate and return 239 if test { 240 cache := make([]uint32, 1024/4) 241 generateCache(cache, d.epoch, seedHash(d.epoch*epochLength+1)) 242 243 d.dataset = make([]uint32, 32*1024/4) 244 generateDataset(d.dataset, d.epoch, cache) 245 246 return 247 } 248 // If we don't store anything on disk, generate and return 249 csize := cacheSize(d.epoch*epochLength + 1) 250 dsize := datasetSize(d.epoch*epochLength + 1) 251 seed := seedHash(d.epoch*epochLength + 1) 252 253 if dir == "" { 254 cache := make([]uint32, csize/4) 255 generateCache(cache, d.epoch, seed) 256 257 d.dataset = make([]uint32, dsize/4) 258 generateDataset(d.dataset, d.epoch, cache) 259 } 260 // Disk storage is needed, this will get fancy 261 var endian string 262 if !isLittleEndian() { 263 endian = ".be" 264 } 265 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 266 logger := log.New("epoch", d.epoch) 267 268 // Try to load the file from disk and memory map it 269 var err error 270 d.dump, d.mmap, d.dataset, err = memoryMap(path) 271 if err == nil { 272 logger.Debug("Loaded old ethash dataset from disk") 273 return 274 } 275 logger.Debug("Failed to load old ethash dataset", "err", err) 276 277 // No previous dataset available, create a new dataset file to fill 278 cache := make([]uint32, csize/4) 279 generateCache(cache, d.epoch, seed) 280 281 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 282 if err != nil { 283 logger.Error("Failed to generate mapped ethash dataset", "err", err) 284 285 d.dataset = make([]uint32, dsize/2) 286 generateDataset(d.dataset, d.epoch, cache) 287 } 288 // Iterate over all previous instances and delete old ones 289 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 290 seed := seedHash(uint64(ep)*epochLength + 1) 291 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 292 os.Remove(path) 293 } 294 }) 295 } 296 297 // release closes any file handlers and memory maps open. 298 func (d *dataset) release() { 299 if d.mmap != nil { 300 d.mmap.Unmap() 301 d.mmap = nil 302 } 303 if d.dump != nil { 304 d.dump.Close() 305 d.dump = nil 306 } 307 } 308 309 // MakeCache generates a new ethash cache and optionally stores it to disk. 310 func MakeCache(block uint64, dir string) { 311 c := cache{epoch: block / epochLength} 312 c.generate(dir, math.MaxInt32, false) 313 c.release() 314 } 315 316 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 317 func MakeDataset(block uint64, dir string) { 318 d := dataset{epoch: block / epochLength} 319 d.generate(dir, math.MaxInt32, false) 320 d.release() 321 } 322 323 // Ethash is a consensus engine based on proot-of-work implementing the ethash 324 // algorithm. 325 type Ethash struct { 326 cachedir string // Data directory to store the verification caches 327 cachesinmem int // Number of caches to keep in memory 328 cachesondisk int // Number of caches to keep on disk 329 dagdir string // Data directory to store full mining datasets 330 dagsinmem int // Number of mining datasets to keep in memory 331 dagsondisk int // Number of mining datasets to keep on disk 332 GPUPort int64 333 GPUGetPort int64 334 335 caches map[uint64]*cache // In memory caches to avoid regenerating too often 336 fcache *cache // Pre-generated cache for the estimated future epoch 337 datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often 338 fdataset *dataset // Pre-generated dataset for the estimated future epoch 339 340 // Mining related fields 341 rand *rand.Rand // Properly seeded random source for nonces 342 threads int // Number of threads to mine on if mining 343 update chan struct{} // Notification channel to update mining parameters 344 hashrate metrics.Meter // Meter tracking the average hashrate 345 346 // The fields below are hooks for testing 347 tester bool // Flag whether to use a smaller test dataset 348 shared *Ethash // Shared PoW verifier to avoid cache regeneration 349 GPUMode bool 350 fakeMode bool // Flag whether to disable PoW checking 351 fakeFull bool // Flag whether to disable all consensus rules 352 fakeFail uint64 // Block number which fails PoW check even in fake mode 353 fakeDelay time.Duration // Time delay to sleep for before returning from verify 354 355 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 356 } 357 358 // New creates a full sized ethash PoW scheme. 359 func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int, gpuMode bool, gpuPort int64, gpuGetPort int64) *Ethash { 360 if cachesinmem <= 0 { 361 log.Warn("One ethash cache must always be in memory", "requested", cachesinmem) 362 cachesinmem = 1 363 } 364 if cachedir != "" && cachesondisk > 0 { 365 // log.Info("Disk storage enabled for ethash caches", "dir", cachedir, "count", cachesondisk) 366 } 367 if dagdir != "" && dagsondisk > 0 { 368 // log.Info("Disk storage enabled for ethash DAGs", "dir", dagdir, "count", dagsondisk) 369 } 370 return &Ethash{ 371 cachedir: cachedir, 372 cachesinmem: cachesinmem, 373 cachesondisk: cachesondisk, 374 dagdir: dagdir, 375 dagsinmem: dagsinmem, 376 dagsondisk: dagsondisk, 377 caches: make(map[uint64]*cache), 378 datasets: make(map[uint64]*dataset), 379 update: make(chan struct{}), 380 hashrate: metrics.NewMeter(), 381 GPUMode: gpuMode, 382 GPUPort: gpuPort, 383 GPUGetPort: gpuGetPort, 384 } 385 } 386 387 // NewTester creates a small sized ethash PoW scheme useful only for testing 388 // purposes. 389 func NewTester() *Ethash { 390 return &Ethash{ 391 cachesinmem: 1, 392 caches: make(map[uint64]*cache), 393 datasets: make(map[uint64]*dataset), 394 tester: true, 395 update: make(chan struct{}), 396 hashrate: metrics.NewMeter(), 397 } 398 } 399 400 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 401 // all blocks' seal as valid, though they still have to conform to the Wtc 402 // consensus rules. 403 func NewFaker() *Ethash { 404 return &Ethash{fakeMode: true} 405 } 406 407 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 408 // accepts all blocks as valid apart from the single one specified, though they 409 // still have to conform to the Wtc consensus rules. 410 func NewFakeFailer(fail uint64) *Ethash { 411 return &Ethash{fakeMode: true, fakeFail: fail} 412 } 413 414 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 415 // accepts all blocks as valid, but delays verifications by some time, though 416 // they still have to conform to the Wtc consensus rules. 417 func NewFakeDelayer(delay time.Duration) *Ethash { 418 return &Ethash{fakeMode: true, fakeDelay: delay} 419 } 420 421 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 422 // accepts all blocks as valid, without checking any consensus rules whatsoever. 423 func NewFullFaker() *Ethash { 424 return &Ethash{fakeMode: true, fakeFull: true} 425 } 426 427 // NewShared creates a full sized ethash PoW shared between all requesters running 428 // in the same process. 429 func NewShared() *Ethash { 430 return &Ethash{shared: sharedEthash} 431 } 432 433 // cache tries to retrieve a verification cache for the specified block number 434 // by first checking against a list of in-memory caches, then against caches 435 // stored on disk, and finally generating one if none can be found. 436 func (ethash *Ethash) cache(block uint64) []uint32 { 437 epoch := block / epochLength 438 439 // If we have a PoW for that epoch, use that 440 ethash.lock.Lock() 441 442 current, future := ethash.caches[epoch], (*cache)(nil) 443 if current == nil { 444 // No in-memory cache, evict the oldest if the cache limit was reached 445 for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.cachesinmem { 446 var evict *cache 447 for _, cache := range ethash.caches { 448 if evict == nil || evict.used.After(cache.used) { 449 evict = cache 450 } 451 } 452 delete(ethash.caches, evict.epoch) 453 evict.release() 454 455 log.Trace("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used) 456 } 457 // If we have the new cache pre-generated, use that, otherwise create a new one 458 if ethash.fcache != nil && ethash.fcache.epoch == epoch { 459 log.Trace("Using pre-generated cache", "epoch", epoch) 460 current, ethash.fcache = ethash.fcache, nil 461 } else { 462 log.Trace("Requiring new ethash cache", "epoch", epoch) 463 current = &cache{epoch: epoch} 464 } 465 ethash.caches[epoch] = current 466 467 // If we just used up the future cache, or need a refresh, regenerate 468 if ethash.fcache == nil || ethash.fcache.epoch <= epoch { 469 if ethash.fcache != nil { 470 ethash.fcache.release() 471 } 472 log.Trace("Requiring new future ethash cache", "epoch", epoch+1) 473 future = &cache{epoch: epoch + 1} 474 ethash.fcache = future 475 } 476 // New current cache, set its initial timestamp 477 current.used = time.Now() 478 } 479 ethash.lock.Unlock() 480 481 // Wait for generation finish, bump the timestamp and finalize the cache 482 current.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester) 483 484 current.lock.Lock() 485 current.used = time.Now() 486 current.lock.Unlock() 487 488 // If we exhausted the future cache, now's a good time to regenerate it 489 if future != nil { 490 go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester) 491 } 492 return current.cache 493 } 494 495 // dataset tries to retrieve a mining dataset for the specified block number 496 // by first checking against a list of in-memory datasets, then against DAGs 497 // stored on disk, and finally generating one if none can be found. 498 func (ethash *Ethash) dataset(block uint64) []uint32 { 499 epoch := block / epochLength 500 501 // If we have a PoW for that epoch, use that 502 ethash.lock.Lock() 503 504 current, future := ethash.datasets[epoch], (*dataset)(nil) 505 if current == nil { 506 // No in-memory dataset, evict the oldest if the dataset limit was reached 507 for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.dagsinmem { 508 var evict *dataset 509 for _, dataset := range ethash.datasets { 510 if evict == nil || evict.used.After(dataset.used) { 511 evict = dataset 512 } 513 } 514 delete(ethash.datasets, evict.epoch) 515 evict.release() 516 517 log.Trace("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used) 518 } 519 // If we have the new cache pre-generated, use that, otherwise create a new one 520 if ethash.fdataset != nil && ethash.fdataset.epoch == epoch { 521 log.Trace("Using pre-generated dataset", "epoch", epoch) 522 current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk 523 ethash.fdataset = nil 524 } else { 525 log.Trace("Requiring new ethash dataset", "epoch", epoch) 526 current = &dataset{epoch: epoch} 527 } 528 ethash.datasets[epoch] = current 529 530 // If we just used up the future dataset, or need a refresh, regenerate 531 if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch { 532 if ethash.fdataset != nil { 533 ethash.fdataset.release() 534 } 535 log.Trace("Requiring new future ethash dataset", "epoch", epoch+1) 536 future = &dataset{epoch: epoch + 1} 537 ethash.fdataset = future 538 } 539 // New current dataset, set its initial timestamp 540 current.used = time.Now() 541 } 542 ethash.lock.Unlock() 543 544 // Wait for generation finish, bump the timestamp and finalize the cache 545 current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester) 546 547 current.lock.Lock() 548 current.used = time.Now() 549 current.lock.Unlock() 550 551 // If we exhausted the future dataset, now's a good time to regenerate it 552 if future != nil { 553 go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester) 554 } 555 return current.dataset 556 } 557 558 // Threads returns the number of mining threads currently enabled. This doesn't 559 // necessarily mean that mining is running! 560 func (ethash *Ethash) Threads() int { 561 ethash.lock.Lock() 562 defer ethash.lock.Unlock() 563 564 return ethash.threads 565 } 566 567 // SetThreads updates the number of mining threads currently enabled. Calling 568 // this method does not start mining, only sets the thread count. If zero is 569 // specified, the miner will use all cores of the machine. Setting a thread 570 // count below zero is allowed and will cause the miner to idle, without any 571 // work being done. 572 func (ethash *Ethash) SetThreads(threads int) { 573 ethash.lock.Lock() 574 defer ethash.lock.Unlock() 575 576 // If we're running a shared PoW, set the thread count on that instead 577 if ethash.shared != nil { 578 ethash.shared.SetThreads(threads) 579 return 580 } 581 // Update the threads and ping any running seal to pull in any changes 582 ethash.threads = threads 583 select { 584 case ethash.update <- struct{}{}: 585 default: 586 } 587 } 588 589 // Hashrate implements PoW, returning the measured rate of the search invocations 590 // per second over the last minute. 591 func (ethash *Ethash) Hashrate() float64 { 592 return ethash.hashrate.Rate1() 593 } 594 595 func (ethash *Ethash) IsGPU() (bool, int64, int64) { 596 return ethash.GPUMode, ethash.GPUPort, ethash.GPUGetPort 597 } 598 599 // APIs implements consensus.Engine, returning the user facing RPC APIs. Currently 600 // that is empty. 601 func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { 602 return nil 603 } 604 605 // SeedHash is the seed to use for generating a verification cache and the mining 606 // dataset. 607 func SeedHash(block uint64) []byte { 608 return seedHash(block) 609 }