github.com/tuotoo/go-ethereum@v1.7.4-0.20171121184211-049797d40a24/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "strconv" 30 "sync" 31 "time" 32 "unsafe" 33 34 mmap "github.com/edsrzf/mmap-go" 35 "github.com/ethereum/go-ethereum/consensus" 36 "github.com/ethereum/go-ethereum/log" 37 "github.com/ethereum/go-ethereum/rpc" 38 metrics "github.com/rcrowley/go-metrics" 39 ) 40 41 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 42 43 var ( 44 // maxUint256 is a big integer representing 2^256-1 45 maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 46 47 // sharedEthash is a full instance that can be shared between multiple users. 48 sharedEthash = New("", 3, 0, "", 1, 0) 49 50 // algorithmRevision is the data structure version used for file naming. 51 algorithmRevision = 23 52 53 // dumpMagic is a dataset dump header to sanity check a data dump. 54 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 55 ) 56 57 // isLittleEndian returns whether the local system is running in little or big 58 // endian byte order. 59 func isLittleEndian() bool { 60 n := uint32(0x01020304) 61 return *(*byte)(unsafe.Pointer(&n)) == 0x04 62 } 63 64 // memoryMap tries to memory map a file of uint32s for read only access. 65 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 66 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 67 if err != nil { 68 return nil, nil, nil, err 69 } 70 mem, buffer, err := memoryMapFile(file, false) 71 if err != nil { 72 file.Close() 73 return nil, nil, nil, err 74 } 75 for i, magic := range dumpMagic { 76 if buffer[i] != magic { 77 mem.Unmap() 78 file.Close() 79 return nil, nil, nil, ErrInvalidDumpMagic 80 } 81 } 82 return file, mem, buffer[len(dumpMagic):], err 83 } 84 85 // memoryMapFile tries to memory map an already opened file descriptor. 86 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 87 // Try to memory map the file 88 flag := mmap.RDONLY 89 if write { 90 flag = mmap.RDWR 91 } 92 mem, err := mmap.Map(file, flag, 0) 93 if err != nil { 94 return nil, nil, err 95 } 96 // Yay, we managed to memory map the file, here be dragons 97 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 98 header.Len /= 4 99 header.Cap /= 4 100 101 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 102 } 103 104 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 105 // access, fill it with the data from a generator and then move it into the final 106 // path requested. 107 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 108 // Ensure the data folder exists 109 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 110 return nil, nil, nil, err 111 } 112 // Create a huge temporary empty file to fill with data 113 temp := path + "." + strconv.Itoa(rand.Int()) 114 115 dump, err := os.Create(temp) 116 if err != nil { 117 return nil, nil, nil, err 118 } 119 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 120 return nil, nil, nil, err 121 } 122 // Memory map the file for writing and fill it with the generator 123 mem, buffer, err := memoryMapFile(dump, true) 124 if err != nil { 125 dump.Close() 126 return nil, nil, nil, err 127 } 128 copy(buffer, dumpMagic) 129 130 data := buffer[len(dumpMagic):] 131 generator(data) 132 133 if err := mem.Unmap(); err != nil { 134 return nil, nil, nil, err 135 } 136 if err := dump.Close(); err != nil { 137 return nil, nil, nil, err 138 } 139 if err := os.Rename(temp, path); err != nil { 140 return nil, nil, nil, err 141 } 142 return memoryMap(path) 143 } 144 145 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 146 type cache struct { 147 epoch uint64 // Epoch for which this cache is relevant 148 149 dump *os.File // File descriptor of the memory mapped cache 150 mmap mmap.MMap // Memory map itself to unmap before releasing 151 152 cache []uint32 // The actual cache data content (may be memory mapped) 153 used time.Time // Timestamp of the last use for smarter eviction 154 once sync.Once // Ensures the cache is generated only once 155 lock sync.Mutex // Ensures thread safety for updating the usage time 156 } 157 158 // generate ensures that the cache content is generated before use. 159 func (c *cache) generate(dir string, limit int, test bool) { 160 c.once.Do(func() { 161 // If we have a testing cache, generate and return 162 if test { 163 c.cache = make([]uint32, 1024/4) 164 generateCache(c.cache, c.epoch, seedHash(c.epoch*epochLength+1)) 165 return 166 } 167 // If we don't store anything on disk, generate and return 168 size := cacheSize(c.epoch*epochLength + 1) 169 seed := seedHash(c.epoch*epochLength + 1) 170 171 if dir == "" { 172 c.cache = make([]uint32, size/4) 173 generateCache(c.cache, c.epoch, seed) 174 return 175 } 176 // Disk storage is needed, this will get fancy 177 var endian string 178 if !isLittleEndian() { 179 endian = ".be" 180 } 181 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 182 logger := log.New("epoch", c.epoch) 183 184 // Try to load the file from disk and memory map it 185 var err error 186 c.dump, c.mmap, c.cache, err = memoryMap(path) 187 if err == nil { 188 logger.Debug("Loaded old ethash cache from disk") 189 return 190 } 191 logger.Debug("Failed to load old ethash cache", "err", err) 192 193 // No previous cache available, create a new cache file to fill 194 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 195 if err != nil { 196 logger.Error("Failed to generate mapped ethash cache", "err", err) 197 198 c.cache = make([]uint32, size/4) 199 generateCache(c.cache, c.epoch, seed) 200 } 201 // Iterate over all previous instances and delete old ones 202 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 203 seed := seedHash(uint64(ep)*epochLength + 1) 204 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 205 os.Remove(path) 206 } 207 }) 208 } 209 210 // release closes any file handlers and memory maps open. 211 func (c *cache) release() { 212 if c.mmap != nil { 213 c.mmap.Unmap() 214 c.mmap = nil 215 } 216 if c.dump != nil { 217 c.dump.Close() 218 c.dump = nil 219 } 220 } 221 222 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 223 type dataset struct { 224 epoch uint64 // Epoch for which this cache is relevant 225 226 dump *os.File // File descriptor of the memory mapped cache 227 mmap mmap.MMap // Memory map itself to unmap before releasing 228 229 dataset []uint32 // The actual cache data content 230 used time.Time // Timestamp of the last use for smarter eviction 231 once sync.Once // Ensures the cache is generated only once 232 lock sync.Mutex // Ensures thread safety for updating the usage time 233 } 234 235 // generate ensures that the dataset content is generated before use. 236 func (d *dataset) generate(dir string, limit int, test bool) { 237 d.once.Do(func() { 238 // If we have a testing dataset, generate and return 239 if test { 240 cache := make([]uint32, 1024/4) 241 generateCache(cache, d.epoch, seedHash(d.epoch*epochLength+1)) 242 243 d.dataset = make([]uint32, 32*1024/4) 244 generateDataset(d.dataset, d.epoch, cache) 245 246 return 247 } 248 // If we don't store anything on disk, generate and return 249 csize := cacheSize(d.epoch*epochLength + 1) 250 dsize := datasetSize(d.epoch*epochLength + 1) 251 seed := seedHash(d.epoch*epochLength + 1) 252 253 if dir == "" { 254 cache := make([]uint32, csize/4) 255 generateCache(cache, d.epoch, seed) 256 257 d.dataset = make([]uint32, dsize/4) 258 generateDataset(d.dataset, d.epoch, cache) 259 } 260 // Disk storage is needed, this will get fancy 261 var endian string 262 if !isLittleEndian() { 263 endian = ".be" 264 } 265 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 266 logger := log.New("epoch", d.epoch) 267 268 // Try to load the file from disk and memory map it 269 var err error 270 d.dump, d.mmap, d.dataset, err = memoryMap(path) 271 if err == nil { 272 logger.Debug("Loaded old ethash dataset from disk") 273 return 274 } 275 logger.Debug("Failed to load old ethash dataset", "err", err) 276 277 // No previous dataset available, create a new dataset file to fill 278 cache := make([]uint32, csize/4) 279 generateCache(cache, d.epoch, seed) 280 281 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 282 if err != nil { 283 logger.Error("Failed to generate mapped ethash dataset", "err", err) 284 285 d.dataset = make([]uint32, dsize/2) 286 generateDataset(d.dataset, d.epoch, cache) 287 } 288 // Iterate over all previous instances and delete old ones 289 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 290 seed := seedHash(uint64(ep)*epochLength + 1) 291 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 292 os.Remove(path) 293 } 294 }) 295 } 296 297 // release closes any file handlers and memory maps open. 298 func (d *dataset) release() { 299 if d.mmap != nil { 300 d.mmap.Unmap() 301 d.mmap = nil 302 } 303 if d.dump != nil { 304 d.dump.Close() 305 d.dump = nil 306 } 307 } 308 309 // MakeCache generates a new ethash cache and optionally stores it to disk. 310 func MakeCache(block uint64, dir string) { 311 c := cache{epoch: block / epochLength} 312 c.generate(dir, math.MaxInt32, false) 313 c.release() 314 } 315 316 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 317 func MakeDataset(block uint64, dir string) { 318 d := dataset{epoch: block / epochLength} 319 d.generate(dir, math.MaxInt32, false) 320 d.release() 321 } 322 323 // Ethash is a consensus engine based on proot-of-work implementing the ethash 324 // algorithm. 325 type Ethash struct { 326 cachedir string // Data directory to store the verification caches 327 cachesinmem int // Number of caches to keep in memory 328 cachesondisk int // Number of caches to keep on disk 329 dagdir string // Data directory to store full mining datasets 330 dagsinmem int // Number of mining datasets to keep in memory 331 dagsondisk int // Number of mining datasets to keep on disk 332 333 caches map[uint64]*cache // In memory caches to avoid regenerating too often 334 fcache *cache // Pre-generated cache for the estimated future epoch 335 datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often 336 fdataset *dataset // Pre-generated dataset for the estimated future epoch 337 338 // Mining related fields 339 rand *rand.Rand // Properly seeded random source for nonces 340 threads int // Number of threads to mine on if mining 341 update chan struct{} // Notification channel to update mining parameters 342 hashrate metrics.Meter // Meter tracking the average hashrate 343 344 // The fields below are hooks for testing 345 tester bool // Flag whether to use a smaller test dataset 346 shared *Ethash // Shared PoW verifier to avoid cache regeneration 347 fakeMode bool // Flag whether to disable PoW checking 348 fakeFull bool // Flag whether to disable all consensus rules 349 fakeFail uint64 // Block number which fails PoW check even in fake mode 350 fakeDelay time.Duration // Time delay to sleep for before returning from verify 351 352 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 353 } 354 355 // New creates a full sized ethash PoW scheme. 356 func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash { 357 if cachesinmem <= 0 { 358 log.Warn("One ethash cache must always be in memory", "requested", cachesinmem) 359 cachesinmem = 1 360 } 361 if cachedir != "" && cachesondisk > 0 { 362 log.Info("Disk storage enabled for ethash caches", "dir", cachedir, "count", cachesondisk) 363 } 364 if dagdir != "" && dagsondisk > 0 { 365 log.Info("Disk storage enabled for ethash DAGs", "dir", dagdir, "count", dagsondisk) 366 } 367 return &Ethash{ 368 cachedir: cachedir, 369 cachesinmem: cachesinmem, 370 cachesondisk: cachesondisk, 371 dagdir: dagdir, 372 dagsinmem: dagsinmem, 373 dagsondisk: dagsondisk, 374 caches: make(map[uint64]*cache), 375 datasets: make(map[uint64]*dataset), 376 update: make(chan struct{}), 377 hashrate: metrics.NewMeter(), 378 } 379 } 380 381 // NewTester creates a small sized ethash PoW scheme useful only for testing 382 // purposes. 383 func NewTester() *Ethash { 384 return &Ethash{ 385 cachesinmem: 1, 386 caches: make(map[uint64]*cache), 387 datasets: make(map[uint64]*dataset), 388 tester: true, 389 update: make(chan struct{}), 390 hashrate: metrics.NewMeter(), 391 } 392 } 393 394 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 395 // all blocks' seal as valid, though they still have to conform to the Ethereum 396 // consensus rules. 397 func NewFaker() *Ethash { 398 return &Ethash{fakeMode: true} 399 } 400 401 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 402 // accepts all blocks as valid apart from the single one specified, though they 403 // still have to conform to the Ethereum consensus rules. 404 func NewFakeFailer(fail uint64) *Ethash { 405 return &Ethash{fakeMode: true, fakeFail: fail} 406 } 407 408 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 409 // accepts all blocks as valid, but delays verifications by some time, though 410 // they still have to conform to the Ethereum consensus rules. 411 func NewFakeDelayer(delay time.Duration) *Ethash { 412 return &Ethash{fakeMode: true, fakeDelay: delay} 413 } 414 415 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 416 // accepts all blocks as valid, without checking any consensus rules whatsoever. 417 func NewFullFaker() *Ethash { 418 return &Ethash{fakeMode: true, fakeFull: true} 419 } 420 421 // NewShared creates a full sized ethash PoW shared between all requesters running 422 // in the same process. 423 func NewShared() *Ethash { 424 return &Ethash{shared: sharedEthash} 425 } 426 427 // cache tries to retrieve a verification cache for the specified block number 428 // by first checking against a list of in-memory caches, then against caches 429 // stored on disk, and finally generating one if none can be found. 430 func (ethash *Ethash) cache(block uint64) []uint32 { 431 epoch := block / epochLength 432 433 // If we have a PoW for that epoch, use that 434 ethash.lock.Lock() 435 436 current, future := ethash.caches[epoch], (*cache)(nil) 437 if current == nil { 438 // No in-memory cache, evict the oldest if the cache limit was reached 439 for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.cachesinmem { 440 var evict *cache 441 for _, cache := range ethash.caches { 442 if evict == nil || evict.used.After(cache.used) { 443 evict = cache 444 } 445 } 446 delete(ethash.caches, evict.epoch) 447 evict.release() 448 449 log.Trace("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used) 450 } 451 // If we have the new cache pre-generated, use that, otherwise create a new one 452 if ethash.fcache != nil && ethash.fcache.epoch == epoch { 453 log.Trace("Using pre-generated cache", "epoch", epoch) 454 current, ethash.fcache = ethash.fcache, nil 455 } else { 456 log.Trace("Requiring new ethash cache", "epoch", epoch) 457 current = &cache{epoch: epoch} 458 } 459 ethash.caches[epoch] = current 460 461 // If we just used up the future cache, or need a refresh, regenerate 462 if ethash.fcache == nil || ethash.fcache.epoch <= epoch { 463 if ethash.fcache != nil { 464 ethash.fcache.release() 465 } 466 log.Trace("Requiring new future ethash cache", "epoch", epoch+1) 467 future = &cache{epoch: epoch + 1} 468 ethash.fcache = future 469 } 470 // New current cache, set its initial timestamp 471 current.used = time.Now() 472 } 473 ethash.lock.Unlock() 474 475 // Wait for generation finish, bump the timestamp and finalize the cache 476 current.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester) 477 478 current.lock.Lock() 479 current.used = time.Now() 480 current.lock.Unlock() 481 482 // If we exhausted the future cache, now's a good time to regenerate it 483 if future != nil { 484 go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester) 485 } 486 return current.cache 487 } 488 489 // dataset tries to retrieve a mining dataset for the specified block number 490 // by first checking against a list of in-memory datasets, then against DAGs 491 // stored on disk, and finally generating one if none can be found. 492 func (ethash *Ethash) dataset(block uint64) []uint32 { 493 epoch := block / epochLength 494 495 // If we have a PoW for that epoch, use that 496 ethash.lock.Lock() 497 498 current, future := ethash.datasets[epoch], (*dataset)(nil) 499 if current == nil { 500 // No in-memory dataset, evict the oldest if the dataset limit was reached 501 for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.dagsinmem { 502 var evict *dataset 503 for _, dataset := range ethash.datasets { 504 if evict == nil || evict.used.After(dataset.used) { 505 evict = dataset 506 } 507 } 508 delete(ethash.datasets, evict.epoch) 509 evict.release() 510 511 log.Trace("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used) 512 } 513 // If we have the new cache pre-generated, use that, otherwise create a new one 514 if ethash.fdataset != nil && ethash.fdataset.epoch == epoch { 515 log.Trace("Using pre-generated dataset", "epoch", epoch) 516 current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk 517 ethash.fdataset = nil 518 } else { 519 log.Trace("Requiring new ethash dataset", "epoch", epoch) 520 current = &dataset{epoch: epoch} 521 } 522 ethash.datasets[epoch] = current 523 524 // If we just used up the future dataset, or need a refresh, regenerate 525 if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch { 526 if ethash.fdataset != nil { 527 ethash.fdataset.release() 528 } 529 log.Trace("Requiring new future ethash dataset", "epoch", epoch+1) 530 future = &dataset{epoch: epoch + 1} 531 ethash.fdataset = future 532 } 533 // New current dataset, set its initial timestamp 534 current.used = time.Now() 535 } 536 ethash.lock.Unlock() 537 538 // Wait for generation finish, bump the timestamp and finalize the cache 539 current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester) 540 541 current.lock.Lock() 542 current.used = time.Now() 543 current.lock.Unlock() 544 545 // If we exhausted the future dataset, now's a good time to regenerate it 546 if future != nil { 547 go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester) 548 } 549 return current.dataset 550 } 551 552 // Threads returns the number of mining threads currently enabled. This doesn't 553 // necessarily mean that mining is running! 554 func (ethash *Ethash) Threads() int { 555 ethash.lock.Lock() 556 defer ethash.lock.Unlock() 557 558 return ethash.threads 559 } 560 561 // SetThreads updates the number of mining threads currently enabled. Calling 562 // this method does not start mining, only sets the thread count. If zero is 563 // specified, the miner will use all cores of the machine. Setting a thread 564 // count below zero is allowed and will cause the miner to idle, without any 565 // work being done. 566 func (ethash *Ethash) SetThreads(threads int) { 567 ethash.lock.Lock() 568 defer ethash.lock.Unlock() 569 570 // If we're running a shared PoW, set the thread count on that instead 571 if ethash.shared != nil { 572 ethash.shared.SetThreads(threads) 573 return 574 } 575 // Update the threads and ping any running seal to pull in any changes 576 ethash.threads = threads 577 select { 578 case ethash.update <- struct{}{}: 579 default: 580 } 581 } 582 583 // Hashrate implements PoW, returning the measured rate of the search invocations 584 // per second over the last minute. 585 func (ethash *Ethash) Hashrate() float64 { 586 return ethash.hashrate.Rate1() 587 } 588 589 // APIs implements consensus.Engine, returning the user facing RPC APIs. Currently 590 // that is empty. 591 func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { 592 return nil 593 } 594 595 // SeedHash is the seed to use for generating a verification cache and the mining 596 // dataset. 597 func SeedHash(block uint64) []byte { 598 return seedHash(block) 599 }