github.com/quinndk/ethereum_read@v0.0.0-20181211143958-29c55eec3237/go-ethereum-master_read/consensus/ethash/ethash.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package ethash implements the ethash proof-of-work consensus engine. 18 package ethash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "runtime" 30 "strconv" 31 "sync" 32 "time" 33 "unsafe" 34 35 mmap "github.com/edsrzf/mmap-go" 36 "github.com/ethereum/go-ethereum/consensus" 37 "github.com/ethereum/go-ethereum/log" 38 "github.com/ethereum/go-ethereum/metrics" 39 "github.com/ethereum/go-ethereum/rpc" 40 "github.com/hashicorp/golang-lru/simplelru" 41 ) 42 43 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 44 45 var ( 46 // maxUint256 is a big integer representing 2^256-1 47 maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 48 49 // sharedEthash is a full instance that can be shared between multiple users. 50 sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}) 51 52 // algorithmRevision is the data structure version used for file naming. 53 algorithmRevision = 23 54 55 // dumpMagic is a dataset dump header to sanity check a data dump. 56 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 57 ) 58 59 // isLittleEndian returns whether the local system is running in little or big 60 // endian byte order. 61 func isLittleEndian() bool { 62 n := uint32(0x01020304) 63 return *(*byte)(unsafe.Pointer(&n)) == 0x04 64 } 65 66 // memoryMap tries to memory map a file of uint32s for read only access. 67 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 68 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 69 if err != nil { 70 return nil, nil, nil, err 71 } 72 mem, buffer, err := memoryMapFile(file, false) 73 if err != nil { 74 file.Close() 75 return nil, nil, nil, err 76 } 77 for i, magic := range dumpMagic { 78 if buffer[i] != magic { 79 mem.Unmap() 80 file.Close() 81 return nil, nil, nil, ErrInvalidDumpMagic 82 } 83 } 84 return file, mem, buffer[len(dumpMagic):], err 85 } 86 87 // memoryMapFile tries to memory map an already opened file descriptor. 88 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 89 // Try to memory map the file 90 flag := mmap.RDONLY 91 if write { 92 flag = mmap.RDWR 93 } 94 mem, err := mmap.Map(file, flag, 0) 95 if err != nil { 96 return nil, nil, err 97 } 98 // Yay, we managed to memory map the file, here be dragons 99 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 100 header.Len /= 4 101 header.Cap /= 4 102 103 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 104 } 105 106 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 107 // access, fill it with the data from a generator and then move it into the final 108 // path requested. 109 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 110 // Ensure the data folder exists 111 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 112 return nil, nil, nil, err 113 } 114 // Create a huge temporary empty file to fill with data 115 temp := path + "." + strconv.Itoa(rand.Int()) 116 117 dump, err := os.Create(temp) 118 if err != nil { 119 return nil, nil, nil, err 120 } 121 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 122 return nil, nil, nil, err 123 } 124 // Memory map the file for writing and fill it with the generator 125 mem, buffer, err := memoryMapFile(dump, true) 126 if err != nil { 127 dump.Close() 128 return nil, nil, nil, err 129 } 130 copy(buffer, dumpMagic) 131 132 data := buffer[len(dumpMagic):] 133 generator(data) 134 135 if err := mem.Unmap(); err != nil { 136 return nil, nil, nil, err 137 } 138 if err := dump.Close(); err != nil { 139 return nil, nil, nil, err 140 } 141 if err := os.Rename(temp, path); err != nil { 142 return nil, nil, nil, err 143 } 144 return memoryMap(path) 145 } 146 147 // lru tracks caches or datasets by their last use time, keeping at most N of them. 148 type lru struct { 149 what string 150 new func(epoch uint64) interface{} 151 mu sync.Mutex 152 // Items are kept in a LRU cache, but there is a special case: 153 // We always keep an item for (highest seen epoch) + 1 as the 'future item'. 154 cache *simplelru.LRU 155 future uint64 156 futureItem interface{} 157 } 158 159 // newlru create a new least-recently-used cache for either the verification caches 160 // or the mining datasets. 161 func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { 162 if maxItems <= 0 { 163 maxItems = 1 164 } 165 cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { 166 log.Trace("Evicted ethash "+what, "epoch", key) 167 }) 168 return &lru{what: what, new: new, cache: cache} 169 } 170 171 // get retrieves or creates an item for the given epoch. The first return value is always 172 // non-nil. The second return value is non-nil if lru thinks that an item will be useful in 173 // the near future. 174 func (lru *lru) get(epoch uint64) (item, future interface{}) { 175 lru.mu.Lock() 176 defer lru.mu.Unlock() 177 178 // Get or create the item for the requested epoch. 179 item, ok := lru.cache.Get(epoch) 180 if !ok { 181 if lru.future > 0 && lru.future == epoch { 182 item = lru.futureItem 183 } else { 184 log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) 185 item = lru.new(epoch) 186 } 187 lru.cache.Add(epoch, item) 188 } 189 // Update the 'future item' if epoch is larger than previously seen. 190 if epoch < maxEpoch-1 && lru.future < epoch+1 { 191 log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) 192 future = lru.new(epoch + 1) 193 lru.future = epoch + 1 194 lru.futureItem = future 195 } 196 return item, future 197 } 198 199 // cache wraps an ethash cache with some metadata to allow easier concurrent use. 200 // cache使用一些元数据包装ethash缓存,以便更容易并发使用。 201 type cache struct { 202 // 属于哪一个epoch 203 epoch uint64 // Epoch for which this cache is relevant 204 // 该内存存储于磁盘的文件对象 205 dump *os.File // File descriptor of the memory mapped cache 206 // 内存映射 207 mmap mmap.MMap // Memory map itself to unmap before releasing 208 // 实际使用的内存 209 cache []uint32 // The actual cache data content (may be memory mapped) 210 once sync.Once // Ensures the cache is generated only once 211 } 212 213 // newCache creates a new ethash verification cache and returns it as a plain Go 214 // interface to be usable in an LRU cache. 215 func newCache(epoch uint64) interface{} { 216 return &cache{epoch: epoch} 217 } 218 219 // generate ensures that the cache content is generated before use. 220 func (c *cache) generate(dir string, limit int, test bool) { 221 c.once.Do(func() { 222 size := cacheSize(c.epoch*epochLength + 1) 223 seed := seedHash(c.epoch*epochLength + 1) 224 if test { 225 size = 1024 226 } 227 // If we don't store anything on disk, generate and return. 228 if dir == "" { 229 c.cache = make([]uint32, size/4) 230 generateCache(c.cache, c.epoch, seed) 231 return 232 } 233 // Disk storage is needed, this will get fancy 234 var endian string 235 if !isLittleEndian() { 236 endian = ".be" 237 } 238 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 239 logger := log.New("epoch", c.epoch) 240 241 // We're about to mmap the file, ensure that the mapping is cleaned up when the 242 // cache becomes unused. 243 runtime.SetFinalizer(c, (*cache).finalizer) 244 245 // Try to load the file from disk and memory map it 246 var err error 247 c.dump, c.mmap, c.cache, err = memoryMap(path) 248 if err == nil { 249 logger.Debug("Loaded old ethash cache from disk") 250 return 251 } 252 logger.Debug("Failed to load old ethash cache", "err", err) 253 254 // No previous cache available, create a new cache file to fill 255 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 256 if err != nil { 257 logger.Error("Failed to generate mapped ethash cache", "err", err) 258 259 c.cache = make([]uint32, size/4) 260 generateCache(c.cache, c.epoch, seed) 261 } 262 // Iterate over all previous instances and delete old ones 263 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 264 seed := seedHash(uint64(ep)*epochLength + 1) 265 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 266 os.Remove(path) 267 } 268 }) 269 } 270 271 // finalizer unmaps the memory and closes the file. 272 func (c *cache) finalizer() { 273 if c.mmap != nil { 274 c.mmap.Unmap() 275 c.dump.Close() 276 c.mmap, c.dump = nil, nil 277 } 278 } 279 280 // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. 281 type dataset struct { 282 epoch uint64 // Epoch for which this cache is relevant 283 dump *os.File // File descriptor of the memory mapped cache 284 mmap mmap.MMap // Memory map itself to unmap before releasing 285 dataset []uint32 // The actual cache data content 286 once sync.Once // Ensures the cache is generated only once 287 } 288 289 // newDataset creates a new ethash mining dataset and returns it as a plain Go 290 // interface to be usable in an LRU cache. 291 func newDataset(epoch uint64) interface{} { 292 return &dataset{epoch: epoch} 293 } 294 295 // generate ensures that the dataset content is generated before use. 296 // 确保在使用之前生成DAG数据集 297 func (d *dataset) generate(dir string, limit int, test bool) { 298 d.once.Do(func() { 299 300 //cache和dataset集合(DAG)大小计算 301 csize := cacheSize(d.epoch*epochLength + 1) 302 dsize := datasetSize(d.epoch*epochLength + 1) 303 seed := seedHash(d.epoch*epochLength + 1) 304 if test { 305 csize = 1024 306 dsize = 32 * 1024 307 } 308 // If we don't store anything on disk, generate and return 309 // 目前DAG目录里还不存在DAG文件,则根据cache创建DAG 310 if dir == "" { 311 cache := make([]uint32, csize/4) 312 generateCache(cache, d.epoch, seed) 313 314 d.dataset = make([]uint32, dsize/4) 315 generateDataset(d.dataset, d.epoch, cache) 316 } 317 // Disk storage is needed, this will get fancy 318 // 需要磁盘存储 319 var endian string 320 if !isLittleEndian() { 321 endian = ".be" 322 } 323 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 324 logger := log.New("epoch", d.epoch) 325 326 // We're about to mmap the file, ensure that the mapping is cleaned up when the 327 // cache becomes unused. 328 runtime.SetFinalizer(d, (*dataset).finalizer) 329 330 // Try to load the file from disk and memory map it 331 // 加载DAG文件并将内存映射到它 332 var err error 333 d.dump, d.mmap, d.dataset, err = memoryMap(path) 334 if err == nil { 335 logger.Debug("Loaded old ethash dataset from disk") 336 return 337 } 338 logger.Debug("Failed to load old ethash dataset", "err", err) 339 340 // No previous dataset available, create a new dataset file to fill 341 // 没有以前的可用数据集,创建要填充的数据集文件 342 cache := make([]uint32, csize/4) 343 generateCache(cache, d.epoch, seed) 344 345 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 346 if err != nil { 347 logger.Error("Failed to generate mapped ethash dataset", "err", err) 348 349 d.dataset = make([]uint32, dsize/2) 350 generateDataset(d.dataset, d.epoch, cache) 351 } 352 // Iterate over all previous instances and delete old ones 353 // 迭代更新DAG文件并删除过于老旧的 354 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 355 seed := seedHash(uint64(ep)*epochLength + 1) 356 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 357 os.Remove(path) 358 } 359 }) 360 } 361 362 // finalizer closes any file handlers and memory maps open. 363 func (d *dataset) finalizer() { 364 if d.mmap != nil { 365 d.mmap.Unmap() 366 d.dump.Close() 367 d.mmap, d.dump = nil, nil 368 } 369 } 370 371 // MakeCache generates a new ethash cache and optionally stores it to disk. 372 func MakeCache(block uint64, dir string) { 373 c := cache{epoch: block / epochLength} 374 c.generate(dir, math.MaxInt32, false) 375 } 376 377 // MakeDataset generates a new ethash dataset and optionally stores it to disk. 378 func MakeDataset(block uint64, dir string) { 379 d := dataset{epoch: block / epochLength} 380 d.generate(dir, math.MaxInt32, false) 381 } 382 383 // Mode defines the type and amount of PoW verification an ethash engine makes. 384 type Mode uint 385 386 const ( 387 ModeNormal Mode = iota 388 ModeShared 389 ModeTest 390 ModeFake 391 ModeFullFake 392 ) 393 394 // Config are the configuration parameters of the ethash. 395 type Config struct { 396 CacheDir string 397 CachesInMem int 398 CachesOnDisk int 399 DatasetDir string 400 DatasetsInMem int 401 DatasetsOnDisk int 402 PowMode Mode 403 } 404 405 // Ethash is a consensus engine based on proof-of-work implementing the ethash 406 // algorithm. 407 type Ethash struct { 408 409 // ethash配置 410 config Config 411 412 // 内存缓存,可反复使用避免再生太频繁 413 caches *lru // In memory caches to avoid regenerating too often 414 // 内存数据集 415 datasets *lru // In memory datasets to avoid regenerating too often 416 417 // Mining related fields 418 // 随机工具,用来生成种子 419 rand *rand.Rand // Properly seeded random source for nonces 420 // 挖矿的线程数 421 threads int // Number of threads to mine on if mining 422 // 挖矿通道 423 update chan struct{} // Notification channel to update mining parameters 424 // 平均哈希率 425 hashrate metrics.Meter // Meter tracking the average hashrate 426 427 // The fields below are hooks for testing 428 // 共享pow,无法再生缓存 429 shared *Ethash // Shared PoW verifier to avoid cache regeneration 430 // 未通过pow的区块号,包括fakeMode 431 fakeFail uint64 // Block number which fails PoW check even in fake mode 432 // 验证工作返回消息前的延迟时间 433 fakeDelay time.Duration // Time delay to sleep for before returning from verify 434 435 // 同步锁 436 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 437 } 438 439 // New creates a full sized ethash PoW scheme. 440 // 生成ethash对象 441 func New(config Config) *Ethash { 442 if config.CachesInMem <= 0 { 443 log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) 444 config.CachesInMem = 1 445 } 446 if config.CacheDir != "" && config.CachesOnDisk > 0 { 447 log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 448 } 449 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 450 log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 451 } 452 return &Ethash{ 453 config: config, 454 caches: newlru("cache", config.CachesInMem, newCache), 455 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 456 update: make(chan struct{}), 457 hashrate: metrics.NewMeter(), 458 } 459 } 460 461 // NewTester creates a small sized ethash PoW scheme useful only for testing 462 // purposes. 463 func NewTester() *Ethash { 464 return New(Config{CachesInMem: 1, PowMode: ModeTest}) 465 } 466 467 // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts 468 // all blocks' seal as valid, though they still have to conform to the Ethereum 469 // consensus rules. 470 func NewFaker() *Ethash { 471 return &Ethash{ 472 config: Config{ 473 PowMode: ModeFake, 474 }, 475 } 476 } 477 478 // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that 479 // accepts all blocks as valid apart from the single one specified, though they 480 // still have to conform to the Ethereum consensus rules. 481 func NewFakeFailer(fail uint64) *Ethash { 482 return &Ethash{ 483 config: Config{ 484 PowMode: ModeFake, 485 }, 486 fakeFail: fail, 487 } 488 } 489 490 // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that 491 // accepts all blocks as valid, but delays verifications by some time, though 492 // they still have to conform to the Ethereum consensus rules. 493 func NewFakeDelayer(delay time.Duration) *Ethash { 494 return &Ethash{ 495 config: Config{ 496 PowMode: ModeFake, 497 }, 498 fakeDelay: delay, 499 } 500 } 501 502 // NewFullFaker creates an ethash consensus engine with a full fake scheme that 503 // accepts all blocks as valid, without checking any consensus rules whatsoever. 504 func NewFullFaker() *Ethash { 505 return &Ethash{ 506 config: Config{ 507 PowMode: ModeFullFake, 508 }, 509 } 510 } 511 512 // NewShared creates a full sized ethash PoW shared between all requesters running 513 // in the same process. 514 func NewShared() *Ethash { 515 return &Ethash{shared: sharedEthash} 516 } 517 518 // cache tries to retrieve a verification cache for the specified block number 519 // by first checking against a list of in-memory caches, then against caches 520 // stored on disk, and finally generating one if none can be found. 521 func (ethash *Ethash) cache(block uint64) *cache { 522 epoch := block / epochLength 523 currentI, futureI := ethash.caches.get(epoch) 524 current := currentI.(*cache) 525 526 // Wait for generation finish. 527 current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 528 529 // If we need a new future cache, now's a good time to regenerate it. 530 if futureI != nil { 531 future := futureI.(*cache) 532 go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest) 533 } 534 return current 535 } 536 537 // dataset tries to retrieve a mining dataset for the specified block number 538 // by first checking against a list of in-memory datasets, then against DAGs 539 // stored on disk, and finally generating one if none can be found. 540 // 在磁盘上找到一个DAG,如果没有则创建 541 func (ethash *Ethash) dataset(block uint64) *dataset { 542 // 计算当前对应的epoch 543 epoch := block / epochLength 544 currentI, futureI := ethash.datasets.get(epoch) 545 current := currentI.(*dataset) 546 547 // Wait for generation finish. 548 // 这里有的话会直接加载,没有的话才会真的创建 详见generate代码 549 current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 550 551 // If we need a new future dataset, now's a good time to regenerate it. 552 // 创建一个将来的DAG以保障epoch过渡流畅 553 if futureI != nil { 554 future := futureI.(*dataset) 555 go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) 556 } 557 558 return current 559 } 560 561 // Threads returns the number of mining threads currently enabled. This doesn't 562 // necessarily mean that mining is running! 563 func (ethash *Ethash) Threads() int { 564 ethash.lock.Lock() 565 defer ethash.lock.Unlock() 566 567 return ethash.threads 568 } 569 570 // SetThreads updates the number of mining threads currently enabled. Calling 571 // this method does not start mining, only sets the thread count. If zero is 572 // specified, the miner will use all cores of the machine. Setting a thread 573 // count below zero is allowed and will cause the miner to idle, without any 574 // work being done. 575 func (ethash *Ethash) SetThreads(threads int) { 576 ethash.lock.Lock() 577 defer ethash.lock.Unlock() 578 579 // If we're running a shared PoW, set the thread count on that instead 580 if ethash.shared != nil { 581 ethash.shared.SetThreads(threads) 582 return 583 } 584 // Update the threads and ping any running seal to pull in any changes 585 ethash.threads = threads 586 select { 587 case ethash.update <- struct{}{}: 588 default: 589 } 590 } 591 592 // Hashrate implements PoW, returning the measured rate of the search invocations 593 // per second over the last minute. 594 func (ethash *Ethash) Hashrate() float64 { 595 return ethash.hashrate.Rate1() 596 } 597 598 // APIs implements consensus.Engine, returning the user facing RPC APIs. Currently 599 // that is empty. 600 func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { 601 return nil 602 } 603 604 // SeedHash is the seed to use for generating a verification cache and the mining 605 // dataset. 606 func SeedHash(block uint64) []byte { 607 return seedHash(block) 608 }