github.com/aquanetwork/aquachain@v1.7.8/consensus/aquahash/aquahash.go (about) 1 // Copyright 2017 The aquachain Authors 2 // This file is part of the aquachain library. 3 // 4 // The aquachain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The aquachain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package aquahash implements the aquahash proof-of-work consensus engine. 18 package aquahash 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "math/rand" 26 "os" 27 "path/filepath" 28 "reflect" 29 "runtime" 30 "strconv" 31 "sync" 32 "time" 33 "unsafe" 34 35 mmap "github.com/edsrzf/mmap-go" 36 "github.com/hashicorp/golang-lru/simplelru" 37 "gitlab.com/aquachain/aquachain/common/log" 38 "gitlab.com/aquachain/aquachain/common/metrics" 39 "gitlab.com/aquachain/aquachain/consensus" 40 "gitlab.com/aquachain/aquachain/rpc" 41 ) 42 43 var ErrInvalidDumpMagic = errors.New("invalid dump magic") 44 45 var ( 46 // maxUint256 is a big integer representing 2^256-1 47 maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) 48 49 // sharedAquahash is a full instance that can be shared between multiple users. 50 sharedAquahash = New(Config{"", 3, 0, "", 1, 0, ModeNormal, 0}) 51 52 // algorithmRevision is the data structure version used for file naming. 53 algorithmRevision = 2 54 55 // dumpMagic is a dataset dump header to sanity check a data dump. 56 dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} 57 ) 58 59 // isLittleEndian returns whether the local system is running in little or big 60 // endian byte order. 61 func isLittleEndian() bool { 62 n := uint32(0x01020304) 63 return *(*byte)(unsafe.Pointer(&n)) == 0x04 64 } 65 66 // memoryMap tries to memory map a file of uint32s for read only access. 67 func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { 68 file, err := os.OpenFile(path, os.O_RDONLY, 0644) 69 if err != nil { 70 return nil, nil, nil, err 71 } 72 mem, buffer, err := memoryMapFile(file, false) 73 if err != nil { 74 file.Close() 75 return nil, nil, nil, err 76 } 77 for i, magic := range dumpMagic { 78 if buffer[i] != magic { 79 mem.Unmap() 80 file.Close() 81 return nil, nil, nil, ErrInvalidDumpMagic 82 } 83 } 84 return file, mem, buffer[len(dumpMagic):], err 85 } 86 87 // memoryMapFile tries to memory map an already opened file descriptor. 88 func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) { 89 // Try to memory map the file 90 flag := mmap.RDONLY 91 if write { 92 flag = mmap.RDWR 93 } 94 mem, err := mmap.Map(file, flag, 0) 95 if err != nil { 96 return nil, nil, err 97 } 98 // Yay, we managed to memory map the file, here be dragons 99 header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem)) 100 header.Len /= 4 101 header.Cap /= 4 102 103 return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil 104 } 105 106 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write 107 // access, fill it with the data from a generator and then move it into the final 108 // path requested. 109 func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) { 110 // Ensure the data folder exists 111 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 112 return nil, nil, nil, err 113 } 114 // Create a huge temporary empty file to fill with data 115 temp := path + "." + strconv.Itoa(rand.Int()) 116 117 dump, err := os.Create(temp) 118 if err != nil { 119 return nil, nil, nil, err 120 } 121 if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { 122 return nil, nil, nil, err 123 } 124 // Memory map the file for writing and fill it with the generator 125 mem, buffer, err := memoryMapFile(dump, true) 126 if err != nil { 127 dump.Close() 128 return nil, nil, nil, err 129 } 130 copy(buffer, dumpMagic) 131 132 data := buffer[len(dumpMagic):] 133 generator(data) 134 135 if err := mem.Unmap(); err != nil { 136 return nil, nil, nil, err 137 } 138 if err := dump.Close(); err != nil { 139 return nil, nil, nil, err 140 } 141 if err := os.Rename(temp, path); err != nil { 142 return nil, nil, nil, err 143 } 144 return memoryMap(path) 145 } 146 147 // lru tracks caches or datasets by their last use time, keeping at most N of them. 148 type lru struct { 149 what string 150 new func(epoch uint64) interface{} 151 mu sync.Mutex 152 // Items are kept in a LRU cache, but there is a special case: 153 // We always keep an item for (highest seen epoch) + 1 as the 'future item'. 154 cache *simplelru.LRU 155 future uint64 156 futureItem interface{} 157 } 158 159 // newlru create a new least-recently-used cache for ither the verification caches 160 // or the mining datasets. 161 func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { 162 if maxItems <= 0 { 163 maxItems = 1 164 } 165 cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { 166 log.Trace("Evicted aquahash "+what, "epoch", key) 167 }) 168 return &lru{what: what, new: new, cache: cache} 169 } 170 171 // get retrieves or creates an item for the given epoch. The first return value is always 172 // non-nil. The second return value is non-nil if lru thinks that an item will be useful in 173 // the near future. 174 func (lru *lru) get(epoch uint64) (item, future interface{}) { 175 lru.mu.Lock() 176 defer lru.mu.Unlock() 177 178 // Get or create the item for the requested epoch. 179 item, ok := lru.cache.Get(epoch) 180 if !ok { 181 if lru.future > 0 && lru.future == epoch { 182 item = lru.futureItem 183 } else { 184 log.Trace("Requiring new aquahash "+lru.what, "epoch", epoch) 185 item = lru.new(epoch) 186 } 187 lru.cache.Add(epoch, item) 188 } 189 // Update the 'future item' if epoch is larger than previously seen. 190 if epoch < maxEpoch-1 && lru.future < epoch+1 { 191 log.Trace("Requiring new future aquahash "+lru.what, "epoch", epoch+1) 192 future = lru.new(epoch + 1) 193 lru.future = epoch + 1 194 lru.futureItem = future 195 } 196 return item, future 197 } 198 199 // cache wraps an aquahash cache with some metadata to allow easier concurrent use. 200 type cache struct { 201 epoch uint64 // Epoch for which this cache is relevant 202 dump *os.File // File descriptor of the memory mapped cache 203 mmap mmap.MMap // Memory map itself to unmap before releasing 204 cache []uint32 // The actual cache data content (may be memory mapped) 205 once sync.Once // Ensures the cache is generated only once 206 } 207 208 // newCache creates a new aquahash verification cache and returns it as a plain Go 209 // interface to be usable in an LRU cache. 210 func newCache(epoch uint64) interface{} { 211 return &cache{epoch: epoch} 212 } 213 214 // generate ensures that the cache content is generated before use. 215 func (c *cache) generate(dir string, limit int, test bool) { 216 c.once.Do(func() { 217 size := cacheSize(c.epoch*epochLength + 1) 218 seed := seedHash(c.epoch*epochLength+1, 0) 219 if test { 220 size = 1024 221 } 222 // If we don't store anything on disk, generate and return. 223 if dir == "" { 224 c.cache = make([]uint32, size/4) 225 generateCache(c.cache, c.epoch, seed) 226 return 227 } 228 // Disk storage is needed, this will get fancy 229 var endian string 230 if !isLittleEndian() { 231 endian = ".be" 232 } 233 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 234 logger := log.New("epoch", c.epoch) 235 236 // We're about to mmap the file, ensure that the mapping is cleaned up when the 237 // cache becomes unused. 238 runtime.SetFinalizer(c, (*cache).finalizer) 239 240 // Try to load the file from disk and memory map it 241 var err error 242 c.dump, c.mmap, c.cache, err = memoryMap(path) 243 if err == nil { 244 logger.Debug("Loaded old aquahash cache from disk") 245 return 246 } 247 logger.Debug("Failed to load old aquahash cache", "err", err) 248 249 // No previous cache available, create a new cache file to fill 250 c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) 251 if err != nil { 252 logger.Error("Failed to generate mapped aquahash cache", "err", err) 253 254 c.cache = make([]uint32, size/4) 255 generateCache(c.cache, c.epoch, seed) 256 } 257 // Iterate over all previous instances and delete old ones 258 for ep := int(c.epoch) - limit; ep >= 0; ep-- { 259 seed := seedHash(uint64(ep)*epochLength+1, 0) 260 path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) 261 os.Remove(path) 262 } 263 }) 264 } 265 266 // finalizer unmaps the memory and closes the file. 267 func (c *cache) finalizer() { 268 if c.mmap != nil { 269 c.mmap.Unmap() 270 c.dump.Close() 271 c.mmap, c.dump = nil, nil 272 } 273 } 274 275 // dataset wraps an aquahash dataset with some metadata to allow easier concurrent use. 276 type dataset struct { 277 epoch uint64 // Epoch for which this cache is relevant 278 dump *os.File // File descriptor of the memory mapped cache 279 mmap mmap.MMap // Memory map itself to unmap before releasing 280 dataset []uint32 // The actual cache data content 281 once sync.Once // Ensures the cache is generated only once 282 } 283 284 // newDataset creates a new aquahash mining dataset and returns it as a plain Go 285 // interface to be usable in an LRU cache. 286 func newDataset(epoch uint64) interface{} { 287 return &dataset{epoch: epoch} 288 } 289 290 // generate ensures that the dataset content is generated before use. 291 func (d *dataset) generate(dir string, limit int, test bool) { 292 d.once.Do(func() { 293 csize := cacheSize(d.epoch*epochLength + 1) 294 dsize := datasetSize(d.epoch*epochLength + 1) 295 seed := seedHash(d.epoch*epochLength+1, 0) 296 if test { 297 csize = 1024 298 dsize = 32 * 1024 299 } 300 // If we don't store anything on disk, generate and return 301 if dir == "" { 302 cache := make([]uint32, csize/4) 303 generateCache(cache, d.epoch, seed) 304 305 d.dataset = make([]uint32, dsize/4) 306 generateDataset(d.dataset, d.epoch, cache) 307 } 308 // Disk storage is needed, this will get fancy 309 var endian string 310 if !isLittleEndian() { 311 endian = ".be" 312 } 313 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 314 logger := log.New("epoch", d.epoch) 315 316 // We're about to mmap the file, ensure that the mapping is cleaned up when the 317 // cache becomes unused. 318 runtime.SetFinalizer(d, (*dataset).finalizer) 319 320 // Try to load the file from disk and memory map it 321 var err error 322 d.dump, d.mmap, d.dataset, err = memoryMap(path) 323 if err == nil { 324 logger.Debug("Loaded old aquahash dataset from disk") 325 return 326 } 327 logger.Debug("Failed to load old aquahash dataset", "err", err) 328 329 // No previous dataset available, create a new dataset file to fill 330 cache := make([]uint32, csize/4) 331 generateCache(cache, d.epoch, seed) 332 333 d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) 334 if err != nil { 335 logger.Error("Failed to generate mapped aquahash dataset", "err", err) 336 337 d.dataset = make([]uint32, dsize/2) 338 generateDataset(d.dataset, d.epoch, cache) 339 } 340 // Iterate over all previous instances and delete old ones 341 for ep := int(d.epoch) - limit; ep >= 0; ep-- { 342 seed := seedHash(uint64(ep)*epochLength+1, 0) 343 path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) 344 os.Remove(path) 345 } 346 }) 347 } 348 349 // finalizer closes any file handlers and memory maps open. 350 func (d *dataset) finalizer() { 351 if d.mmap != nil { 352 d.mmap.Unmap() 353 d.dump.Close() 354 d.mmap, d.dump = nil, nil 355 } 356 } 357 358 // MakeCache generates a new aquahash cache and optionally stores it to disk. 359 func MakeCache(block uint64, dir string) { 360 c := cache{epoch: block / epochLength} 361 c.generate(dir, math.MaxInt32, false) 362 } 363 364 // MakeDataset generates a new aquahash dataset and optionally stores it to disk. 365 func MakeDataset(block uint64, dir string) { 366 d := dataset{epoch: block / epochLength} 367 d.generate(dir, math.MaxInt32, false) 368 } 369 370 // Mode defines the type and amount of PoW verification an aquahash engine makes. 371 type Mode uint 372 373 const ( 374 ModeNormal Mode = iota 375 ModeShared 376 ModeTest 377 ModeFake 378 ModeFullFake 379 ) 380 381 // Config are the configuration parameters of the aquahash. 382 type Config struct { 383 CacheDir string 384 CachesInMem int 385 CachesOnDisk int 386 DatasetDir string 387 DatasetsInMem int 388 DatasetsOnDisk int 389 PowMode Mode 390 StartVersion byte 391 } 392 393 // Aquahash is a consensus engine based on proot-of-work implementing the aquahash 394 // algorithm. 395 type Aquahash struct { 396 config Config 397 398 caches *lru // In memory caches to avoid regenerating too often 399 datasets *lru // In memory datasets to avoid regenerating too often 400 401 // Mining related fields 402 rand *rand.Rand // Properly seeded random source for nonces 403 threads int // Number of threads to mine on if mining 404 update chan struct{} // Notification channel to update mining parameters 405 hashrate metrics.Meter // Meter tracking the average hashrate 406 407 // The fields below are hooks for testing 408 shared *Aquahash // Shared PoW verifier to avoid cache regeneration 409 fakeFail uint64 // Block number which fails PoW check even in fake mode 410 fakeDelay time.Duration // Time delay to sleep for before returning from verify 411 412 lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields 413 } 414 415 // New creates a full sized aquahash PoW scheme. 416 func New(config Config) *Aquahash { 417 if config.StartVersion > 1 { 418 log.Info("Starting new Aquahash engine", "startVersion", config.StartVersion) 419 return &Aquahash{ 420 config: config, 421 update: make(chan struct{}), 422 hashrate: metrics.NewMeter(), 423 } 424 } 425 if config.CachesInMem <= 0 { 426 log.Warn("One aquahash cache must always be in memory", "requested", config.CachesInMem) 427 config.CachesInMem = 1 428 } 429 if config.CacheDir != "" && config.CachesOnDisk > 0 { 430 log.Info("Disk storage enabled for aquahash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) 431 } 432 if config.DatasetDir != "" && config.DatasetsOnDisk > 0 { 433 log.Info("Disk storage enabled for aquahash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk) 434 } 435 return &Aquahash{ 436 config: config, 437 caches: newlru("cache", config.CachesInMem, newCache), 438 datasets: newlru("dataset", config.DatasetsInMem, newDataset), 439 update: make(chan struct{}), 440 hashrate: metrics.NewMeter(), 441 } 442 } 443 444 // NewTester creates a small sized aquahash PoW scheme useful only for testing 445 // purposes. 446 func NewTester() *Aquahash { 447 return New(Config{CachesInMem: 1, PowMode: ModeTest}) 448 } 449 450 // NewFaker creates a aquahash consensus engine with a fake PoW scheme that accepts 451 // all blocks' seal as valid, though they still have to conform to the AquaChain 452 // consensus rules. 453 func NewFaker() *Aquahash { 454 return &Aquahash{ 455 config: Config{ 456 PowMode: ModeFake, 457 }, 458 } 459 } 460 461 // NewFakeFailer creates a aquahash consensus engine with a fake PoW scheme that 462 // accepts all blocks as valid apart from the single one specified, though they 463 // still have to conform to the AquaChain consensus rules. 464 func NewFakeFailer(fail uint64) *Aquahash { 465 return &Aquahash{ 466 config: Config{ 467 PowMode: ModeFake, 468 }, 469 fakeFail: fail, 470 } 471 } 472 473 // NewFakeDelayer creates a aquahash consensus engine with a fake PoW scheme that 474 // accepts all blocks as valid, but delays verifications by some time, though 475 // they still have to conform to the AquaChain consensus rules. 476 func NewFakeDelayer(delay time.Duration) *Aquahash { 477 return &Aquahash{ 478 config: Config{ 479 PowMode: ModeFake, 480 }, 481 fakeDelay: delay, 482 } 483 } 484 485 // NewFullFaker creates an aquahash consensus engine with a full fake scheme that 486 // accepts all blocks as valid, without checking any consensus rules whatsoever. 487 func NewFullFaker() *Aquahash { 488 return &Aquahash{ 489 config: Config{ 490 PowMode: ModeFullFake, 491 }, 492 } 493 } 494 495 // NewShared creates a full sized aquahash PoW shared between all requesters running 496 // in the same process. 497 func NewShared() *Aquahash { 498 return &Aquahash{shared: sharedAquahash} 499 } 500 501 // cache tries to retrieve a verification cache for the specified block number 502 // by first checking against a list of in-memory caches, then against caches 503 // stored on disk, and finally generating one if none can be found. 504 func (aquahash *Aquahash) cache(block uint64) *cache { 505 if aquahash.config.StartVersion > 1 { 506 return nil 507 } 508 epoch := block / epochLength 509 currentI, futureI := aquahash.caches.get(epoch) 510 current := currentI.(*cache) 511 512 // Wait for generation finish. 513 current.generate(aquahash.config.CacheDir, aquahash.config.CachesOnDisk, aquahash.config.PowMode == ModeTest) 514 515 // If we need a new future cache, now's a good time to regenerate it. 516 if futureI != nil { 517 future := futureI.(*cache) 518 go future.generate(aquahash.config.CacheDir, aquahash.config.CachesOnDisk, aquahash.config.PowMode == ModeTest) 519 } 520 return current 521 } 522 523 // dataset tries to retrieve a mining dataset for the specified block number 524 // by first checking against a list of in-memory datasets, then against DAGs 525 // stored on disk, and finally generating one if none can be found. 526 func (aquahash *Aquahash) dataset(block uint64) *dataset { 527 epoch := block / epochLength 528 currentI, futureI := aquahash.datasets.get(epoch) 529 current := currentI.(*dataset) 530 531 // Wait for generation finish. 532 current.generate(aquahash.config.DatasetDir, aquahash.config.DatasetsOnDisk, aquahash.config.PowMode == ModeTest) 533 534 // If we need a new future dataset, now's a good time to regenerate it. 535 if futureI != nil { 536 future := futureI.(*dataset) 537 go future.generate(aquahash.config.DatasetDir, aquahash.config.DatasetsOnDisk, aquahash.config.PowMode == ModeTest) 538 } 539 540 return current 541 } 542 543 // Threads returns the number of mining threads currently enabled. This doesn't 544 // necessarily mean that mining is running! 545 func (aquahash *Aquahash) Threads() int { 546 aquahash.lock.Lock() 547 defer aquahash.lock.Unlock() 548 549 return aquahash.threads 550 } 551 552 // SetThreads updates the number of mining threads currently enabled. Calling 553 // this method does not start mining, only sets the thread count. If zero is 554 // specified, the miner will use all cores of the machine. Setting a thread 555 // count below zero is allowed and will cause the miner to idle, without any 556 // work being done. 557 func (aquahash *Aquahash) SetThreads(threads int) { 558 aquahash.lock.Lock() 559 defer aquahash.lock.Unlock() 560 561 // If we're running a shared PoW, set the thread count on that instead 562 if aquahash.shared != nil { 563 aquahash.shared.SetThreads(threads) 564 return 565 } 566 // Update the threads and ping any running seal to pull in any changes 567 aquahash.threads = threads 568 select { 569 case aquahash.update <- struct{}{}: 570 default: 571 } 572 } 573 574 // Hashrate implements PoW, returning the measured rate of the search invocations 575 // per second over the last minute. 576 func (aquahash *Aquahash) Hashrate() float64 { 577 return aquahash.hashrate.Rate1() 578 } 579 580 // APIs implements consensus.Engine, returning the user facing RPC APIs. Currently 581 // that is empty. 582 func (aquahash *Aquahash) APIs(chain consensus.ChainReader) []rpc.API { 583 return nil 584 } 585 586 // SeedHash is the seed to use for generating a verification cache and the mining 587 // dataset. 588 func SeedHash(block uint64, version byte) []byte { 589 return seedHash(block, version) 590 }