github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The Spectrum Authors 2 // This file is part of the Spectrum library. 3 // 4 // The Spectrum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The Spectrum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "strconv" 30 "sync" 31 "time" 32 "unsafe" 33 34 "github.com/SmartMeshFoundation/Spectrum/consensus" 35 "github.com/SmartMeshFoundation/Spectrum/log" 36 "github.com/SmartMeshFoundation/Spectrum/rpc" 37 mmap "github.com/edsrzf/mmap-go" 38 metrics "github.com/rcrowley/go-metrics" 39 ) 40 41 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 42 43 var ( 44 // maxUint256 is a big integer representing 2^256-1 45 maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 46 47 // sharedEthash is a full instance that can be shared between multiple users. 48 sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}) 49 50 // algorithmRevision is the data structure version used for file naming. 51 algorithmRevision = 23 52 53 // dumpMagic is a dataset dump header to sanity check a data dump. 54 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 55 ) 56 57 // isLittleEndian returns whether the local system is running in little or big 58 // endian byte order. 59 func isLittleEndian() bool { 60 n := uint32(0x01020304) 61 return *(*byte)(unsafe.Pointer(&n)) == 0x04 62 } 63 64 // memoryMap tries to memory map a file of uint32s for read only access. 65 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 66 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 67 if err != nil { 68 return nil, nil, nil, err 69 } 70 mem, buffer, err := memoryMapFile(file, false) 71 if err != nil { 72 file.Close() 73 return nil, nil, nil, err 74 } 75 for i, magic := range dumpMagic { 76 if buffer[i] != magic { 77 mem.Unmap() 78 file.Close() 79 return nil, nil, nil, ErrInvalidDumpMagic 80 } 81 } 82 return file, mem, buffer[len(dumpMagic):], err 83 } 84 85 // memoryMapFile tries to memory map an already opened file descriptor. 86 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 87 // Try to memory map the file 88 flag := mmap.RDONLY 89 if write { 90 flag = mmap.RDWR 91 } 92 mem, err := mmap.Map(file, flag, 0) 93 if err != nil { 94 return nil, nil, err 95 } 96 // Yay, we managed to memory map the file, here be dragons 97 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 98 header.Len /= 4 99 header.Cap /= 4 100 101 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 102 } 103 104 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 105 // access, fill it with the data from a generator and then move it into the final 106 // path requested. 107 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 108 // Ensure the data folder exists 109 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 110 return nil, nil, nil, err 111 } 112 // Create a huge temporary empty file to fill with data 113 temp := path + "." + strconv.Itoa(rand.Int()) 114 115 dump, err := os.Create(temp) 116 if err != nil { 117 return nil, nil, nil, err 118 } 119 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 120 return nil, nil, nil, err 121 } 122 // Memory map the file for writing and fill it with the generator 123 mem, buffer, err := memoryMapFile(dump, true) 124 if err != nil { 125 dump.Close() 126 return nil, nil, nil, err 127 } 128 copy(buffer, dumpMagic) 129 130 data := buffer[len(dumpMagic):] 131 generator(data) 132 133 if err := mem.Unmap(); err != nil { 134 return nil, nil, nil, err 135 } 136 if err := dump.Close(); err != nil { 137 return nil, nil, nil, err 138 } 139 if err := os.Rename(temp, path); err != nil { 140 return nil, nil, nil, err 141 } 142 return memoryMap(path) 143 } 144 145 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 146 type cache struct { 147 epoch uint64 // Epoch for which this cache is relevant 148 149 dump *os.File // File descriptor of the memory mapped cache 150 mmap mmap.MMap // Memory map itself to unmap before releasing 151 152 cache []uint32 // The actual cache data content (may be memory mapped) 153 used time.Time // Timestamp of the last use for smarter eviction 154 once sync.Once // Ensures the cache is generated only once 155 lock sync.Mutex // Ensures thread safety for updating the usage time 156 } 157 158 // generate ensures that the cache content is generated before use. 159 func (c *cache) generate(dir string, limit int, test bool) { 160 c.once.Do(func() { 161 // If we have a testing cache, generate and return 162 if test { 163 c.cache = make([]uint32, 1024/4) 164 generateCache(c.cache, c.epoch, seedHash(c.epoch*epochLength+1)) 165 return 166 } 167 // If we don't store anything on disk, generate and return 168 size := cacheSize(c.epoch*epochLength + 1) 169 seed := seedHash(c.epoch*epochLength + 1) 170 171 if dir == "" { 172 c.cache = make([]uint32, size/4) 173 generateCache(c.cache, c.epoch, seed) 174 return 175 } 176 // Disk storage is needed, this will get fancy 177 var endian string 178 if !isLittleEndian() { 179 endian = ".be" 180 } 181 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 182 logger := log.New("epoch", c.epoch) 183 184 // Try to load the file from disk and memory map it 185 var err error 186 c.dump, c.mmap, c.cache, err = memoryMap(path) 187 if err == nil { 188 logger.Debug("Loaded old ethash cache from disk") 189 return 190 } 191 logger.Debug("Failed to load old ethash cache", "err", err) 192 193 // No previous cache available, create a new cache file to fill 194 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 195 if err != nil { 196 logger.Error("Failed to generate mapped ethash cache", "err", err) 197 198 c.cache = make([]uint32, size/4) 199 generateCache(c.cache, c.epoch, seed) 200 } 201 // Iterate over all previous instances and delete old ones 202 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 203 seed := seedHash(uint64(ep)*epochLength + 1) 204 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 205 os.Remove(path) 206 } 207 }) 208 } 209 210 // release closes any file handlers and memory maps open. 211 func (c *cache) release() { 212 if c.mmap != nil { 213 c.mmap.Unmap() 214 c.mmap = nil 215 } 216 if c.dump != nil { 217 c.dump.Close() 218 c.dump = nil 219 } 220 } 221 222 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 223 type dataset struct { 224 epoch uint64 // Epoch for which this cache is relevant 225 226 dump *os.File // File descriptor of the memory mapped cache 227 mmap mmap.MMap // Memory map itself to unmap before releasing 228 229 dataset []uint32 // The actual cache data content 230 used time.Time // Timestamp of the last use for smarter eviction 231 once sync.Once // Ensures the cache is generated only once 232 lock sync.Mutex // Ensures thread safety for updating the usage time 233 } 234 235 // generate ensures that the dataset content is generated before use. 236 func (d *dataset) generate(dir string, limit int, test bool) { 237 d.once.Do(func() { 238 // If we have a testing dataset, generate and return 239 if test { 240 cache := make([]uint32, 1024/4) 241 generateCache(cache, d.epoch, seedHash(d.epoch*epochLength+1)) 242 243 d.dataset = make([]uint32, 32*1024/4) 244 generateDataset(d.dataset, d.epoch, cache) 245 246 return 247 } 248 // If we don't store anything on disk, generate and return 249 csize := cacheSize(d.epoch*epochLength + 1) 250 dsize := datasetSize(d.epoch*epochLength + 1) 251 seed := seedHash(d.epoch*epochLength + 1) 252 253 if dir == "" { 254 cache := make([]uint32, csize/4) 255 generateCache(cache, d.epoch, seed) 256 257 d.dataset = make([]uint32, dsize/4) 258 generateDataset(d.dataset, d.epoch, cache) 259 } 260 // Disk storage is needed, this will get fancy 261 var endian string 262 if !isLittleEndian() { 263 endian = ".be" 264 } 265 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 266 logger := log.New("epoch", d.epoch) 267 268 // Try to load the file from disk and memory map it 269 var err error 270 d.dump, d.mmap, d.dataset, err = memoryMap(path) 271 if err == nil { 272 logger.Debug("Loaded old ethash dataset from disk") 273 return 274 } 275 logger.Debug("Failed to load old ethash dataset", "err", err) 276 277 // No previous dataset available, create a new dataset file to fill 278 cache := make([]uint32, csize/4) 279 generateCache(cache, d.epoch, seed) 280 281 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 282 if err != nil { 283 logger.Error("Failed to generate mapped ethash dataset", "err", err) 284 285 d.dataset = make([]uint32, dsize/2) 286 generateDataset(d.dataset, d.epoch, cache) 287 } 288 // Iterate over all previous instances and delete old ones 289 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 290 seed := seedHash(uint64(ep)*epochLength + 1) 291 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 292 os.Remove(path) 293 } 294 }) 295 } 296 297 // release closes any file handlers and memory maps open. 298 func (d *dataset) release() { 299 if d.mmap != nil { 300 d.mmap.Unmap() 301 d.mmap = nil 302 } 303 if d.dump != nil { 304 d.dump.Close() 305 d.dump = nil 306 } 307 } 308 309 // MakeCache generates a new ethash cache and optionally stores it to disk. 310 func MakeCache(block uint64, dir string) { 311 c := cache{epoch: block / epochLength} 312 c.generate(dir, math.MaxInt32, false) 313 c.release() 314 } 315 316 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 317 func MakeDataset(block uint64, dir string) { 318 d := dataset{epoch: block / epochLength} 319 d.generate(dir, math.MaxInt32, false) 320 d.release() 321 } 322 323 // Mode defines the type and amount of PoW verification an ethash engine makes. 324 type Mode uint 325 326 const ( 327 ModeNormal Mode = iota 328 ModeShared 329 ModeTest 330 ModeFake 331 ModeFullFake 332 ) 333 334 // Config are the configuration parameters of the ethash. 335 type Config struct { 336 CacheDir string 337 CachesInMem int 338 CachesOnDisk int 339 DatasetDir string 340 DatasetsInMem int 341 DatasetsOnDisk int 342 PowMode Mode 343 } 344 345 // Ethash is a consensus engine based on proot-of-work implementing the ethash 346 // algorithm. 347 type Ethash struct { 348 config Config 349 350 caches map[uint64]*cache // In memory caches to avoid regenerating too often 351 fcache *cache // Pre-generated cache for the estimated future epoch 352 datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often 353 fdataset *dataset // Pre-generated dataset for the estimated future epoch 354 355 // Mining related fields 356 rand *rand.Rand // Properly seeded random source for nonces 357 threads int // Number of threads to mine on if mining 358 update chan struct{} // Notification channel to update mining parameters 359 hashrate metrics.Meter // Meter tracking the average hashrate 360 361 // The fields below are hooks for testing 362 shared *Ethash // Shared PoW verifier to avoid cache regeneration 363 fakeFail uint64 // Block number which fails PoW check even in fake mode 364 fakeDelay time.Duration // Time delay to sleep for before returning from verify 365 366 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 367 } 368 369 // New creates a full sized ethash PoW scheme. 370 func New(config Config) *Ethash { 371 if config.CachesInMem <= 0 { 372 log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 373 config.CachesInMem = 1 374 } 375 if config.CacheDir != "" && config.CachesOnDisk > 0 { 376 log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 377 } 378 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 379 log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 380 } 381 return &Ethash{ 382 config: config, 383 caches: make(map[uint64]*cache), 384 datasets: make(map[uint64]*dataset), 385 update: make(chan struct{}), 386 hashrate: metrics.NewMeter(), 387 } 388 } 389 390 // NewTester creates a small sized ethash PoW scheme useful only for testing 391 // purposes. 392 func NewTester() *Ethash { 393 return &Ethash{ 394 config: Config{ 395 CachesInMem: 1, 396 PowMode: ModeTest, 397 }, 398 caches: make(map[uint64]*cache), 399 datasets: make(map[uint64]*dataset), 400 update: make(chan struct{}), 401 hashrate: metrics.NewMeter(), 402 } 403 } 404 405 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 406 // all blocks' seal as valid, though they still have to conform to the Ethereum 407 // consensus rules. 408 func NewFaker() *Ethash { 409 return &Ethash{ 410 config: Config{ 411 PowMode: ModeFake, 412 }, 413 } 414 } 415 416 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 417 // accepts all blocks as valid apart from the single one specified, though they 418 // still have to conform to the Ethereum consensus rules. 419 func NewFakeFailer(fail uint64) *Ethash { 420 return &Ethash{ 421 config: Config{ 422 PowMode: ModeFake, 423 }, 424 fakeFail: fail, 425 } 426 } 427 428 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 429 // accepts all blocks as valid, but delays verifications by some time, though 430 // they still have to conform to the Ethereum consensus rules. 431 func NewFakeDelayer(delay time.Duration) *Ethash { 432 return &Ethash{ 433 config: Config{ 434 PowMode: ModeFake, 435 }, 436 fakeDelay: delay, 437 } 438 } 439 440 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 441 // accepts all blocks as valid, without checking any consensus rules whatsoever. 442 func NewFullFaker() *Ethash { 443 return &Ethash{ 444 config: Config{ 445 PowMode: ModeFullFake, 446 }, 447 } 448 } 449 450 // NewShared creates a full sized ethash PoW shared between all requesters running 451 // in the same process. 452 func NewShared() *Ethash { 453 return &Ethash{shared: sharedEthash} 454 } 455 456 // cache tries to retrieve a verification cache for the specified block number 457 // by first checking against a list of in-memory caches, then against caches 458 // stored on disk, and finally generating one if none can be found. 459 func (ethash *Ethash) cache(block uint64) []uint32 { 460 epoch := block / epochLength 461 462 // If we have a PoW for that epoch, use that 463 ethash.lock.Lock() 464 465 current, future := ethash.caches[epoch], (*cache)(nil) 466 if current == nil { 467 // No in-memory cache, evict the oldest if the cache limit was reached 468 for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.config.CachesInMem { 469 var evict *cache 470 for _, cache := range ethash.caches { 471 if evict == nil || evict.used.After(cache.used) { 472 evict = cache 473 } 474 } 475 delete(ethash.caches, evict.epoch) 476 evict.release() 477 478 log.Trace("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used) 479 } 480 // If we have the new cache pre-generated, use that, otherwise create a new one 481 if ethash.fcache != nil && ethash.fcache.epoch == epoch { 482 log.Trace("Using pre-generated cache", "epoch", epoch) 483 current, ethash.fcache = ethash.fcache, nil 484 } else { 485 log.Trace("Requiring new ethash cache", "epoch", epoch) 486 current = &cache{epoch: epoch} 487 } 488 ethash.caches[epoch] = current 489 490 // If we just used up the future cache, or need a refresh, regenerate 491 if ethash.fcache == nil || ethash.fcache.epoch <= epoch { 492 if ethash.fcache != nil { 493 ethash.fcache.release() 494 } 495 log.Trace("Requiring new future ethash cache", "epoch", epoch+1) 496 future = &cache{epoch: epoch + 1} 497 ethash.fcache = future 498 } 499 // New current cache, set its initial timestamp 500 current.used = time.Now() 501 } 502 ethash.lock.Unlock() 503 504 // Wait for generation finish, bump the timestamp and finalize the cache 505 current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 506 507 current.lock.Lock() 508 current.used = time.Now() 509 current.lock.Unlock() 510 511 // If we exhausted the future cache, now's a good time to regenerate it 512 if future != nil { 513 go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 514 } 515 return current.cache 516 } 517 518 // dataset tries to retrieve a mining dataset for the specified block number 519 // by first checking against a list of in-memory datasets, then against DAGs 520 // stored on disk, and finally generating one if none can be found. 521 func (ethash *Ethash) dataset(block uint64) []uint32 { 522 epoch := block / epochLength 523 524 // If we have a PoW for that epoch, use that 525 ethash.lock.Lock() 526 527 current, future := ethash.datasets[epoch], (*dataset)(nil) 528 if current == nil { 529 // No in-memory dataset, evict the oldest if the dataset limit was reached 530 for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.config.DatasetsInMem { 531 var evict *dataset 532 for _, dataset := range ethash.datasets { 533 if evict == nil || evict.used.After(dataset.used) { 534 evict = dataset 535 } 536 } 537 delete(ethash.datasets, evict.epoch) 538 evict.release() 539 540 log.Trace("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used) 541 } 542 // If we have the new cache pre-generated, use that, otherwise create a new one 543 if ethash.fdataset != nil && ethash.fdataset.epoch == epoch { 544 log.Trace("Using pre-generated dataset", "epoch", epoch) 545 current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk 546 ethash.fdataset = nil 547 } else { 548 log.Trace("Requiring new ethash dataset", "epoch", epoch) 549 current = &dataset{epoch: epoch} 550 } 551 ethash.datasets[epoch] = current 552 553 // If we just used up the future dataset, or need a refresh, regenerate 554 if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch { 555 if ethash.fdataset != nil { 556 ethash.fdataset.release() 557 } 558 log.Trace("Requiring new future ethash dataset", "epoch", epoch+1) 559 future = &dataset{epoch: epoch + 1} 560 ethash.fdataset = future 561 } 562 // New current dataset, set its initial timestamp 563 current.used = time.Now() 564 } 565 ethash.lock.Unlock() 566 567 // Wait for generation finish, bump the timestamp and finalize the cache 568 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 569 570 current.lock.Lock() 571 current.used = time.Now() 572 current.lock.Unlock() 573 574 // If we exhausted the future dataset, now's a good time to regenerate it 575 if future != nil { 576 go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 577 } 578 return current.dataset 579 } 580 581 // Threads returns the number of mining threads currently enabled. This doesn't 582 // necessarily mean that mining is running! 583 func (ethash *Ethash) Threads() int { 584 ethash.lock.Lock() 585 defer ethash.lock.Unlock() 586 587 return ethash.threads 588 } 589 590 // SetThreads updates the number of mining threads currently enabled. Calling 591 // this method does not start mining, only sets the thread count. If zero is 592 // specified, the miner will use all cores of the machine. Setting a thread 593 // count below zero is allowed and will cause the miner to idle, without any 594 // work being done. 595 func (ethash *Ethash) SetThreads(threads int) { 596 ethash.lock.Lock() 597 defer ethash.lock.Unlock() 598 599 // If we're running a shared PoW, set the thread count on that instead 600 if ethash.shared != nil { 601 ethash.shared.SetThreads(threads) 602 return 603 } 604 // Update the threads and ping any running seal to pull in any changes 605 ethash.threads = threads 606 select { 607 case ethash.update <- struct{}{}: 608 default: 609 } 610 } 611 612 // Hashrate implements PoW, returning the measured rate of the search invocations 613 // per second over the last minute. 614 func (ethash *Ethash) Hashrate() float64 { 615 return ethash.hashrate.Rate1() 616 } 617 618 // APIs implements consensus.Engine, returning the user facing RPC APIs. Currently 619 // that is empty. 620 func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { 621 return nil 622 } 623 624 // SeedHash is the seed to use for generating a verification cache and the mining 625 // dataset. 626 func SeedHash(block uint64) []byte { 627 return seedHash(block) 628 }