github.com/lbryio/lbcd@v0.22.119/fees/estimator.go (about) 1 // Copyright (c) 2018-2020 The Decred developers 2 // Use of this source code is governed by an ISC 3 // license that can be found in the LICENSE file. 4 5 package fees 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "errors" 11 "fmt" 12 "math" 13 "sort" 14 "sync" 15 16 "github.com/lbryio/lbcd/chaincfg/chainhash" 17 "github.com/lbryio/lbcutil" 18 "github.com/syndtr/goleveldb/leveldb" 19 ldbutil "github.com/syndtr/goleveldb/leveldb/util" 20 ) 21 22 const ( 23 // DefaultMaxBucketFeeMultiplier is the default multiplier used to find the 24 // largest fee bucket, starting at the minimum fee. 25 DefaultMaxBucketFeeMultiplier int = 100 26 27 // DefaultMaxConfirmations is the default number of confirmation ranges to 28 // track in the estimator. 29 DefaultMaxConfirmations uint32 = 42 30 31 // DefaultFeeRateStep is the default multiplier between two consecutive fee 32 // rate buckets. 33 DefaultFeeRateStep float64 = 1.05 34 35 // defaultDecay is the default value used to decay old transactions from the 36 // estimator. 37 defaultDecay float64 = 0.998 38 39 // maxAllowedBucketFees is an upper bound of how many bucket fees can be 40 // used in the estimator. This is verified during estimator initialization 41 // and database loading. 42 maxAllowedBucketFees = 2000 43 44 // maxAllowedConfirms is an upper bound of how many confirmation ranges can 45 // be used in the estimator. This is verified during estimator 46 // initialization and database loading. 47 maxAllowedConfirms = 788 48 ) 49 50 var ( 51 // ErrNoSuccessPctBucketFound is the error returned when no bucket has been 52 // found with the minimum required percentage success. 53 ErrNoSuccessPctBucketFound = errors.New("no bucket with the minimum " + 54 "required success percentage found") 55 56 // ErrNotEnoughTxsForEstimate is the error returned when not enough 57 // transactions have been seen by the fee generator to give an estimate. 58 ErrNotEnoughTxsForEstimate = errors.New("not enough transactions seen for " + 59 "estimation") 60 61 dbByteOrder = binary.BigEndian 62 63 dbKeyVersion = []byte("version") 64 dbKeyBucketFees = []byte("bucketFeeBounds") 65 dbKeyMaxConfirms = []byte("maxConfirms") 66 dbKeyBestHeight = []byte("bestHeight") 67 dbKeyBucketPrefix = []byte{0x01, 0x70, 0x1d, 0x00} 68 ) 69 70 // ErrTargetConfTooLarge is the type of error returned when an user of the 71 // estimator requested a confirmation range higher than tracked by the estimator. 72 type ErrTargetConfTooLarge struct { 73 MaxConfirms int32 74 ReqConfirms int32 75 } 76 77 func (e ErrTargetConfTooLarge) Error() string { 78 return fmt.Sprintf("target confirmation requested (%d) higher than "+ 79 "maximum confirmation range tracked by estimator (%d)", e.ReqConfirms, 80 e.MaxConfirms) 81 } 82 83 type feeRate float64 84 85 type txConfirmStatBucketCount struct { 86 txCount float64 87 feeSum float64 88 } 89 90 type txConfirmStatBucket struct { 91 confirmed []txConfirmStatBucketCount 92 confirmCount float64 93 feeSum float64 94 } 95 96 // EstimatorConfig stores the configuration parameters for a given fee 97 // estimator. It is used to initialize an empty fee estimator. 98 type EstimatorConfig struct { 99 // MaxConfirms is the maximum number of confirmation ranges to check. 100 MaxConfirms uint32 101 102 // MinBucketFee is the value of the fee rate of the lowest bucket for which 103 // estimation is tracked. 104 MinBucketFee lbcutil.Amount 105 106 // MaxBucketFee is the value of the fee for the highest bucket for which 107 // estimation is tracked. 108 // 109 // It MUST be higher than MinBucketFee. 110 MaxBucketFee lbcutil.Amount 111 112 // ExtraBucketFee is an additional bucket fee rate to include in the 113 // database for tracking transactions. Specifying this can be useful when 114 // the default relay fee of the network is undergoing change (due to a new 115 // release of the software for example), so that the older fee can be 116 // tracked exactly. 117 // 118 // It MUST have a value between MinBucketFee and MaxBucketFee, otherwise 119 // it's ignored. 120 ExtraBucketFee lbcutil.Amount 121 122 // FeeRateStep is the multiplier to generate the fee rate buckets (each 123 // bucket is higher than the previous one by this factor). 124 // 125 // It MUST have a value > 1.0. 126 FeeRateStep float64 127 128 // DatabaseFile is the location of the estimator database file. If empty, 129 // updates to the estimator state are not backed by the filesystem. 130 DatabaseFile string 131 132 // ReplaceBucketsOnLoad indicates whether to replace the buckets in the 133 // current estimator by those stored in the feesdb file instead of 134 // validating that they are both using the same set of fees. 135 ReplaceBucketsOnLoad bool 136 } 137 138 // memPoolTxDesc is an aux structure used to track the local estimator mempool. 139 type memPoolTxDesc struct { 140 addedHeight int32 141 bucketIndex int32 142 fees feeRate 143 } 144 145 // Estimator tracks historical data for published and mined transactions in 146 // order to estimate fees to be used in new transactions for confirmation 147 // within a target block window. 148 type Estimator struct { 149 // bucketFeeBounds are the upper bounds for each individual fee bucket. 150 bucketFeeBounds []feeRate 151 152 // buckets are the confirmed tx count and fee sum by bucket fee. 153 buckets []txConfirmStatBucket 154 155 // memPool are the mempool transaction count and fee sum by bucket fee. 156 memPool []txConfirmStatBucket 157 158 // memPoolTxs is the map of transaction hashes and data of known mempool txs. 159 memPoolTxs map[chainhash.Hash]memPoolTxDesc 160 161 maxConfirms int32 162 decay float64 163 bestHeight int32 164 db *leveldb.DB 165 lock sync.RWMutex 166 } 167 168 // NewEstimator returns an empty estimator given a config. This estimator 169 // then needs to be fed data for published and mined transactions before it can 170 // be used to estimate fees for new transactions. 171 func NewEstimator(cfg *EstimatorConfig) (*Estimator, error) { 172 // Sanity check the config. 173 if cfg.MaxBucketFee <= cfg.MinBucketFee { 174 return nil, errors.New("maximum bucket fee should not be lower than " + 175 "minimum bucket fee") 176 } 177 if cfg.FeeRateStep <= 1.0 { 178 return nil, errors.New("fee rate step should not be <= 1.0") 179 } 180 if cfg.MinBucketFee <= 0 { 181 return nil, errors.New("minimum bucket fee rate cannot be <= 0") 182 } 183 if cfg.MaxConfirms > maxAllowedConfirms { 184 return nil, fmt.Errorf("confirmation count requested (%d) larger than "+ 185 "maximum allowed (%d)", cfg.MaxConfirms, maxAllowedConfirms) 186 } 187 188 decay := defaultDecay 189 maxConfirms := cfg.MaxConfirms 190 max := float64(cfg.MaxBucketFee) 191 var bucketFees []feeRate 192 prevF := 0.0 193 extraBucketFee := float64(cfg.ExtraBucketFee) 194 for f := float64(cfg.MinBucketFee); f < max; f *= cfg.FeeRateStep { 195 if (f > extraBucketFee) && (prevF < extraBucketFee) { 196 // Add the extra bucket fee for tracking. 197 bucketFees = append(bucketFees, feeRate(extraBucketFee)) 198 } 199 bucketFees = append(bucketFees, feeRate(f)) 200 prevF = f 201 } 202 203 // The last bucket catches everything else, so it uses an upper bound of 204 // +inf which any rate must be lower than. 205 bucketFees = append(bucketFees, feeRate(math.Inf(1))) 206 207 nbBuckets := len(bucketFees) 208 res := &Estimator{ 209 bucketFeeBounds: bucketFees, 210 buckets: make([]txConfirmStatBucket, nbBuckets), 211 memPool: make([]txConfirmStatBucket, nbBuckets), 212 maxConfirms: int32(maxConfirms), 213 decay: decay, 214 memPoolTxs: make(map[chainhash.Hash]memPoolTxDesc), 215 bestHeight: -1, 216 } 217 218 for i := range bucketFees { 219 res.buckets[i] = txConfirmStatBucket{ 220 confirmed: make([]txConfirmStatBucketCount, maxConfirms), 221 } 222 res.memPool[i] = txConfirmStatBucket{ 223 confirmed: make([]txConfirmStatBucketCount, maxConfirms), 224 } 225 } 226 227 if cfg.DatabaseFile != "" { 228 db, err := leveldb.OpenFile(cfg.DatabaseFile, nil) 229 if err != nil { 230 return nil, fmt.Errorf("error opening estimator database: %v", err) 231 } 232 res.db = db 233 234 err = res.loadFromDatabase(cfg.ReplaceBucketsOnLoad) 235 if err != nil { 236 return nil, fmt.Errorf("error loading estimator data from db: %v", 237 err) 238 } 239 } 240 241 return res, nil 242 } 243 244 // DumpBuckets returns the internal estimator state as a string. 245 func (stats *Estimator) DumpBuckets() string { 246 res := " |" 247 for c := 0; c < int(stats.maxConfirms); c++ { 248 if c == int(stats.maxConfirms)-1 { 249 res += fmt.Sprintf(" %15s", "+Inf") 250 } else { 251 res += fmt.Sprintf(" %15d|", c+1) 252 } 253 } 254 res += "\n" 255 256 l := len(stats.bucketFeeBounds) 257 for i := 0; i < l; i++ { 258 res += fmt.Sprintf("%10.8f", stats.bucketFeeBounds[i]/1e8) 259 for c := 0; c < int(stats.maxConfirms); c++ { 260 avg := float64(0) 261 count := stats.buckets[i].confirmed[c].txCount 262 if stats.buckets[i].confirmed[c].txCount > 0 { 263 avg = stats.buckets[i].confirmed[c].feeSum / 264 stats.buckets[i].confirmed[c].txCount / 1e8 265 } 266 267 res += fmt.Sprintf("| %.8f %6.1f", avg, count) 268 } 269 res += "\n" 270 } 271 272 return res 273 } 274 275 // loadFromDatabase loads the estimator data from the currently opened database 276 // and performs any db upgrades if required. After loading, it updates the db 277 // with the current estimator configuration. 278 // 279 // Argument replaceBuckets indicates if the buckets in the current stats should 280 // be completely replaced by what is stored in the database or if the data 281 // should be validated against what is current in the estimator. 282 // 283 // The database should *not* be used while loading is taking place. 284 // 285 // The current code does not support loading from a database created with a 286 // different set of configuration parameters (fee rate buckets, max confirmation 287 // range, etc) than the current estimator is configured with. If an incompatible 288 // file is detected during loading, an error is returned and the user must 289 // either reconfigure the estimator to use the same parameters to allow the 290 // database to be loaded or they must ignore the database file (possibly by 291 // deleting it) so that the new parameters are used. In the future it might be 292 // possible to load from a different set of configuration parameters. 293 // 294 // The current code does not currently save mempool information, since saving 295 // information in the estimator without saving the corresponding data in the 296 // mempool itself could result in transactions lingering in the mempool 297 // estimator forever. 298 func (stats *Estimator) loadFromDatabase(replaceBuckets bool) error { 299 if stats.db == nil { 300 return errors.New("estimator database is not open") 301 } 302 303 // Database version is currently hardcoded here as this is the only 304 // place that uses it. 305 currentDbVersion := []byte{1} 306 307 version, err := stats.db.Get(dbKeyVersion, nil) 308 if err != nil && !errors.Is(err, leveldb.ErrNotFound) { 309 return fmt.Errorf("error reading version from db: %v", err) 310 } 311 if len(version) < 1 { 312 // No data in the file. Fill with the current config. 313 batch := new(leveldb.Batch) 314 b := bytes.NewBuffer(nil) 315 var maxConfirmsBytes [4]byte 316 var bestHeightBytes [8]byte 317 318 batch.Put(dbKeyVersion, currentDbVersion) 319 320 dbByteOrder.PutUint32(maxConfirmsBytes[:], uint32(stats.maxConfirms)) 321 batch.Put(dbKeyMaxConfirms, maxConfirmsBytes[:]) 322 323 dbByteOrder.PutUint64(bestHeightBytes[:], uint64(stats.bestHeight)) 324 batch.Put(dbKeyBestHeight, bestHeightBytes[:]) 325 326 err = binary.Write(b, dbByteOrder, stats.bucketFeeBounds) 327 if err != nil { 328 return fmt.Errorf("error writing bucket fees to db: %v", err) 329 } 330 batch.Put(dbKeyBucketFees, b.Bytes()) 331 332 err = stats.db.Write(batch, nil) 333 if err != nil { 334 return fmt.Errorf("error writing initial estimator db file: %v", 335 err) 336 } 337 338 err = stats.updateDatabase() 339 if err != nil { 340 return fmt.Errorf("error adding initial estimator data to db: %v", 341 err) 342 } 343 344 log.Debug("Initialized fee estimator database") 345 346 return nil 347 } 348 349 if !bytes.Equal(currentDbVersion, version) { 350 return fmt.Errorf("incompatible database version: %d", version) 351 } 352 353 maxConfirmsBytes, err := stats.db.Get(dbKeyMaxConfirms, nil) 354 if err != nil { 355 return fmt.Errorf("error reading max confirmation range from db file: "+ 356 "%v", err) 357 } 358 if len(maxConfirmsBytes) != 4 { 359 return errors.New("wrong number of bytes in stored maxConfirms") 360 } 361 fileMaxConfirms := int32(dbByteOrder.Uint32(maxConfirmsBytes)) 362 if fileMaxConfirms > maxAllowedConfirms { 363 return fmt.Errorf("confirmation count stored in database (%d) larger "+ 364 "than maximum allowed (%d)", fileMaxConfirms, maxAllowedConfirms) 365 } 366 367 feesBytes, err := stats.db.Get(dbKeyBucketFees, nil) 368 if err != nil { 369 return fmt.Errorf("error reading fee bounds from db file: %v", err) 370 } 371 if feesBytes == nil { 372 return errors.New("fee bounds not found in database file") 373 } 374 fileNbBucketFees := len(feesBytes) / 8 375 if fileNbBucketFees > maxAllowedBucketFees { 376 return fmt.Errorf("more fee buckets stored in file (%d) than allowed "+ 377 "(%d)", fileNbBucketFees, maxAllowedBucketFees) 378 } 379 fileBucketFees := make([]feeRate, fileNbBucketFees) 380 err = binary.Read(bytes.NewReader(feesBytes), dbByteOrder, 381 &fileBucketFees) 382 if err != nil { 383 return fmt.Errorf("error decoding file bucket fees: %v", err) 384 } 385 386 if !replaceBuckets { 387 if stats.maxConfirms != fileMaxConfirms { 388 return errors.New("max confirmation range in database file different " + 389 "than currently configured max confirmation") 390 } 391 392 if len(stats.bucketFeeBounds) != len(fileBucketFees) { 393 return errors.New("number of bucket fees stored in database file " + 394 "different than currently configured bucket fees") 395 } 396 397 for i, f := range fileBucketFees { 398 if stats.bucketFeeBounds[i] != f { 399 return errors.New("bucket fee rates stored in database file " + 400 "different than currently configured fees") 401 } 402 } 403 } 404 405 fileBuckets := make([]txConfirmStatBucket, fileNbBucketFees) 406 407 iter := stats.db.NewIterator(ldbutil.BytesPrefix(dbKeyBucketPrefix), nil) 408 err = nil 409 var fbytes [8]byte 410 for iter.Next() { 411 key := iter.Key() 412 if len(key) != 8 { 413 err = fmt.Errorf("bucket key read from db has wrong length (%d)", 414 len(key)) 415 break 416 } 417 idx := int(int32(dbByteOrder.Uint32(key[4:]))) 418 if (idx >= len(fileBuckets)) || (idx < 0) { 419 err = fmt.Errorf("wrong bucket index read from db (%d vs %d)", 420 idx, len(fileBuckets)) 421 break 422 } 423 value := iter.Value() 424 if len(value) != 8+8+int(fileMaxConfirms)*16 { 425 err = errors.New("wrong size of data in bucket read from db") 426 break 427 } 428 429 b := bytes.NewBuffer(value) 430 readf := func() float64 { 431 // We ignore the error here because the only possible one is EOF and 432 // we already previously checked the length of the source byte array 433 // for consistency. 434 b.Read(fbytes[:]) 435 return math.Float64frombits(dbByteOrder.Uint64(fbytes[:])) 436 } 437 438 fileBuckets[idx].confirmCount = readf() 439 fileBuckets[idx].feeSum = readf() 440 fileBuckets[idx].confirmed = make([]txConfirmStatBucketCount, fileMaxConfirms) 441 for i := range fileBuckets[idx].confirmed { 442 fileBuckets[idx].confirmed[i].txCount = readf() 443 fileBuckets[idx].confirmed[i].feeSum = readf() 444 } 445 } 446 iter.Release() 447 if err != nil { 448 return err 449 } 450 err = iter.Error() 451 if err != nil { 452 return fmt.Errorf("error on bucket iterator: %v", err) 453 } 454 455 stats.bucketFeeBounds = fileBucketFees 456 stats.buckets = fileBuckets 457 stats.maxConfirms = fileMaxConfirms 458 log.Debug("Loaded fee estimator database") 459 460 return nil 461 } 462 463 // updateDatabase updates the current database file with the current bucket 464 // data. This is called during normal operation after processing mined 465 // transactions, so it only updates data that might have changed. 466 func (stats *Estimator) updateDatabase() error { 467 if stats.db == nil { 468 return errors.New("estimator database is closed") 469 } 470 471 batch := new(leveldb.Batch) 472 buf := bytes.NewBuffer(nil) 473 474 var key [8]byte 475 copy(key[:], dbKeyBucketPrefix) 476 var fbytes [8]byte 477 writef := func(f float64) { 478 dbByteOrder.PutUint64(fbytes[:], math.Float64bits(f)) 479 _, err := buf.Write(fbytes[:]) 480 if err != nil { 481 panic(err) // only possible error is ErrTooLarge 482 } 483 } 484 485 for i, b := range stats.buckets { 486 dbByteOrder.PutUint32(key[4:], uint32(i)) 487 buf.Reset() 488 writef(b.confirmCount) 489 writef(b.feeSum) 490 for _, c := range b.confirmed { 491 writef(c.txCount) 492 writef(c.feeSum) 493 } 494 batch.Put(key[:], buf.Bytes()) 495 } 496 497 var bestHeightBytes [8]byte 498 499 dbByteOrder.PutUint64(bestHeightBytes[:], uint64(stats.bestHeight)) 500 batch.Put(dbKeyBestHeight, bestHeightBytes[:]) 501 502 err := stats.db.Write(batch, nil) 503 if err != nil { 504 return fmt.Errorf("error writing update to estimator db file: %v", 505 err) 506 } 507 508 return nil 509 } 510 511 // lowerBucket returns the bucket that has the highest upperBound such that it 512 // is still lower than rate. 513 func (stats *Estimator) lowerBucket(rate feeRate) int32 { 514 res := sort.Search(len(stats.bucketFeeBounds), func(i int) bool { 515 return stats.bucketFeeBounds[i] >= rate 516 }) 517 return int32(res) 518 } 519 520 // confirmRange returns the confirmation range index to be used for the given 521 // number of blocks to confirm. The last confirmation range has an upper bound 522 // of +inf to mean that it represents all confirmations higher than the second 523 // to last bucket. 524 func (stats *Estimator) confirmRange(blocksToConfirm int32) int32 { 525 idx := blocksToConfirm - 1 526 if idx >= stats.maxConfirms { 527 return stats.maxConfirms - 1 528 } 529 return idx 530 } 531 532 // updateMovingAverages updates the moving averages for the existing confirmed 533 // statistics and increases the confirmation ranges for mempool txs. This is 534 // meant to be called when a new block is mined, so that we discount older 535 // information. 536 func (stats *Estimator) updateMovingAverages(newHeight int32) { 537 log.Debugf("Updated moving averages into block %d", newHeight) 538 539 // decay the existing stats so that, over time, we rely on more up to date 540 // information regarding fees. 541 for b := 0; b < len(stats.buckets); b++ { 542 bucket := &stats.buckets[b] 543 bucket.feeSum *= stats.decay 544 bucket.confirmCount *= stats.decay 545 for c := 0; c < len(bucket.confirmed); c++ { 546 conf := &bucket.confirmed[c] 547 conf.feeSum *= stats.decay 548 conf.txCount *= stats.decay 549 } 550 } 551 552 // For unconfirmed (mempool) transactions, every transaction will now take 553 // at least one additional block to confirm. So for every fee bucket, we 554 // move the stats up one confirmation range. 555 for b := 0; b < len(stats.memPool); b++ { 556 bucket := &stats.memPool[b] 557 558 // The last confirmation range represents all txs confirmed at >= than 559 // the initial maxConfirms, so we *add* the second to last range into 560 // the last range. 561 c := len(bucket.confirmed) - 1 562 bucket.confirmed[c].txCount += bucket.confirmed[c-1].txCount 563 bucket.confirmed[c].feeSum += bucket.confirmed[c-1].feeSum 564 565 // For the other ranges, just move up the stats. 566 for c--; c > 0; c-- { 567 bucket.confirmed[c] = bucket.confirmed[c-1] 568 } 569 570 // and finally, the very first confirmation range (ie, what will enter 571 // the mempool now that a new block has been mined) is zeroed so we can 572 // start tracking brand new txs. 573 bucket.confirmed[0].txCount = 0 574 bucket.confirmed[0].feeSum = 0 575 } 576 577 stats.bestHeight = newHeight 578 } 579 580 // newMemPoolTx records a new memPool transaction into the stats. A brand new 581 // mempool transaction has a minimum confirmation range of 1, so it is inserted 582 // into the very first confirmation range bucket of the appropriate fee rate 583 // bucket. 584 func (stats *Estimator) newMemPoolTx(bucketIdx int32, fees feeRate) { 585 conf := &stats.memPool[bucketIdx].confirmed[0] 586 conf.feeSum += float64(fees) 587 conf.txCount++ 588 } 589 590 // newMinedTx moves a mined tx from the mempool into the confirmed statistics. 591 // Note that this should only be called if the transaction had been seen and 592 // previously tracked by calling newMemPoolTx for it. Failing to observe that 593 // will result in undefined statistical results. 594 func (stats *Estimator) newMinedTx(blocksToConfirm int32, rate feeRate) { 595 bucketIdx := stats.lowerBucket(rate) 596 confirmIdx := stats.confirmRange(blocksToConfirm) 597 bucket := &stats.buckets[bucketIdx] 598 599 // increase the counts for all confirmation ranges starting at the first 600 // confirmIdx because it took at least `blocksToConfirm` for this tx to be 601 // mined. This is used to simplify the bucket selection during estimation, 602 // so that we only need to check a single confirmation range (instead of 603 // iterating to sum all confirmations with <= `minConfs`). 604 for c := int(confirmIdx); c < len(bucket.confirmed); c++ { 605 conf := &bucket.confirmed[c] 606 conf.feeSum += float64(rate) 607 conf.txCount++ 608 } 609 bucket.confirmCount++ 610 bucket.feeSum += float64(rate) 611 } 612 613 func (stats *Estimator) removeFromMemPool(blocksInMemPool int32, rate feeRate) { 614 bucketIdx := stats.lowerBucket(rate) 615 confirmIdx := stats.confirmRange(blocksInMemPool + 1) 616 bucket := &stats.memPool[bucketIdx] 617 conf := &bucket.confirmed[confirmIdx] 618 conf.feeSum -= float64(rate) 619 conf.txCount-- 620 if conf.txCount < 0 { 621 // If this happens, it means a transaction has been called on this 622 // function but not on a previous newMemPoolTx. This leaves the fee db 623 // in an undefined state and should never happen in regular use. If this 624 // happens, then there is a logic or coding error somewhere, either in 625 // the estimator itself or on its hooking to the mempool/network sync 626 // manager. Either way, the easiest way to fix this is to completely 627 // delete the database and start again. During development, you can use 628 // a panic() here and we might return it after being confident that the 629 // estimator is completely bug free. 630 log.Errorf("Transaction count in bucket index %d and confirmation "+ 631 "index %d became < 0", bucketIdx, confirmIdx) 632 } 633 } 634 635 // estimateMedianFee estimates the median fee rate for the current recorded 636 // statistics such that at least successPct transactions have been mined on all 637 // tracked fee rate buckets with fee >= to the median. 638 // In other words, this is the median fee of the lowest bucket such that it and 639 // all higher fee buckets have >= successPct transactions confirmed in at most 640 // `targetConfs` confirmations. 641 // Note that sometimes the requested combination of targetConfs and successPct is 642 // not achievable (hypothetical example: 99% of txs confirmed within 1 block) 643 // or there are not enough recorded statistics to derive a successful estimate 644 // (eg: confirmation tracking has only started or there was a period of very few 645 // transactions). In those situations, the appropriate error is returned. 646 func (stats *Estimator) estimateMedianFee(targetConfs int32, successPct float64) (feeRate, error) { 647 if targetConfs <= 0 { 648 return 0, errors.New("target confirmation range cannot be <= 0") 649 } 650 651 const minTxCount float64 = 1 652 653 if (targetConfs - 1) >= stats.maxConfirms { 654 // We might want to add support to use a targetConf at +infinity to 655 // allow us to make estimates at confirmation interval higher than what 656 // we currently track. 657 return 0, ErrTargetConfTooLarge{MaxConfirms: stats.maxConfirms, 658 ReqConfirms: targetConfs} 659 } 660 661 startIdx := len(stats.buckets) - 1 662 confirmRangeIdx := stats.confirmRange(targetConfs) 663 664 var totalTxs, confirmedTxs float64 665 bestBucketsStt := startIdx 666 bestBucketsEnd := startIdx 667 curBucketsEnd := startIdx 668 669 for b := startIdx; b >= 0; b-- { 670 totalTxs += stats.buckets[b].confirmCount 671 confirmedTxs += stats.buckets[b].confirmed[confirmRangeIdx].txCount 672 673 // Add the mempool (unconfirmed) transactions to the total tx count 674 // since a very large mempool for the given bucket might mean that 675 // miners are reluctant to include these in their mined blocks. 676 totalTxs += stats.memPool[b].confirmed[confirmRangeIdx].txCount 677 678 if totalTxs > minTxCount { 679 if confirmedTxs/totalTxs < successPct { 680 if curBucketsEnd == startIdx { 681 return 0, ErrNoSuccessPctBucketFound 682 } 683 break 684 } 685 686 bestBucketsStt = b 687 bestBucketsEnd = curBucketsEnd 688 curBucketsEnd = b - 1 689 totalTxs = 0 690 confirmedTxs = 0 691 } 692 } 693 694 txCount := float64(0) 695 for b := bestBucketsStt; b <= bestBucketsEnd; b++ { 696 txCount += stats.buckets[b].confirmCount 697 } 698 if txCount <= 0 { 699 return 0, ErrNotEnoughTxsForEstimate 700 } 701 txCount /= 2 702 for b := bestBucketsStt; b <= bestBucketsEnd; b++ { 703 if stats.buckets[b].confirmCount < txCount { 704 txCount -= stats.buckets[b].confirmCount 705 } else { 706 median := stats.buckets[b].feeSum / stats.buckets[b].confirmCount 707 return feeRate(median), nil 708 } 709 } 710 711 return 0, errors.New("this isn't supposed to be reached") 712 } 713 714 // EstimateFee is the public version of estimateMedianFee. It calculates the 715 // suggested fee for a transaction to be confirmed in at most `targetConf` 716 // blocks after publishing with a high degree of certainty. 717 // 718 // This function is safe to be called from multiple goroutines but might block 719 // until concurrent modifications to the internal database state are complete. 720 func (stats *Estimator) EstimateFee(targetConfs int32) (lbcutil.Amount, error) { 721 stats.lock.RLock() 722 rate, err := stats.estimateMedianFee(targetConfs, 0.95) 723 stats.lock.RUnlock() 724 725 if err != nil { 726 return 0, err 727 } 728 729 rate = feeRate(math.Round(float64(rate))) 730 if rate < stats.bucketFeeBounds[0] { 731 // Prevent our public facing api to ever return something lower than the 732 // minimum fee 733 rate = stats.bucketFeeBounds[0] 734 } 735 736 return lbcutil.Amount(rate), nil 737 } 738 739 // Enable establishes the current best height of the blockchain after 740 // initializing the chain. All new mempool transactions will be added at this 741 // block height. 742 func (stats *Estimator) Enable(bestHeight int32) { 743 log.Debugf("Setting best height as %d", bestHeight) 744 stats.lock.Lock() 745 stats.bestHeight = bestHeight 746 stats.lock.Unlock() 747 } 748 749 // IsEnabled returns whether the fee estimator is ready to accept new mined and 750 // mempool transactions. 751 func (stats *Estimator) IsEnabled() bool { 752 stats.lock.RLock() 753 enabled := stats.bestHeight > -1 754 stats.lock.RUnlock() 755 return enabled 756 } 757 758 // AddMemPoolTransaction adds a mempool transaction to the estimator in order to 759 // account for it in the estimations. It assumes that this transaction is 760 // entering the mempool at the currently recorded best chain hash, using the 761 // total fee amount (in atoms) and with the provided size (in bytes). 762 // 763 // This is safe to be called from multiple goroutines. 764 func (stats *Estimator) AddMemPoolTransaction(txHash *chainhash.Hash, fee, size int64) { 765 stats.lock.Lock() 766 defer stats.lock.Unlock() 767 768 if stats.bestHeight < 0 { 769 return 770 } 771 772 if _, exists := stats.memPoolTxs[*txHash]; exists { 773 // we should not double count transactions 774 return 775 } 776 777 // Note that we use this less exact version instead of fee * 1000 / size 778 // (using ints) because it naturally "downsamples" the fee rates towards the 779 // minimum at values less than 0.001 DCR/KB. This is needed because due to 780 // how the wallet estimates the final fee given an input rate and the final 781 // tx size, there's usually a small discrepancy towards a higher effective 782 // rate in the published tx. 783 rate := feeRate(fee / size * 1000) 784 785 if rate < stats.bucketFeeBounds[0] { 786 // Transactions paying less than the current relaying fee can only 787 // possibly be included in the high priority/zero fee area of blocks, 788 // which are usually of limited size, so we explicitly don't track 789 // those. 790 // This also naturally handles votes (SSGen transactions) which don't 791 // carry a tx fee and are required for inclusion in blocks. Note that 792 // the test is explicitly < instead of <= so that we *can* track 793 // transactions that pay *exactly* the minimum fee. 794 return 795 } 796 797 log.Debugf("Adding mempool tx %s using fee rate %.8f", txHash, rate/1e8) 798 799 tx := memPoolTxDesc{ 800 addedHeight: stats.bestHeight, 801 bucketIndex: stats.lowerBucket(rate), 802 fees: rate, 803 } 804 stats.memPoolTxs[*txHash] = tx 805 stats.newMemPoolTx(tx.bucketIndex, rate) 806 } 807 808 // RemoveMemPoolTransaction removes a mempool transaction from statistics 809 // tracking. 810 // 811 // This is safe to be called from multiple goroutines. 812 func (stats *Estimator) RemoveMemPoolTransaction(txHash *chainhash.Hash) { 813 stats.lock.Lock() 814 defer stats.lock.Unlock() 815 816 desc, exists := stats.memPoolTxs[*txHash] 817 if !exists { 818 return 819 } 820 821 log.Debugf("Removing tx %s from mempool", txHash) 822 823 stats.removeFromMemPool(stats.bestHeight-desc.addedHeight, desc.fees) 824 delete(stats.memPoolTxs, *txHash) 825 } 826 827 // processMinedTransaction moves the transaction that exist in the currently 828 // tracked mempool into a mined state. 829 // 830 // This function is *not* safe to be called from multiple goroutines. 831 func (stats *Estimator) processMinedTransaction(blockHeight int32, txh *chainhash.Hash) { 832 desc, exists := stats.memPoolTxs[*txh] 833 if !exists { 834 // We cannot use transactions that we didn't know about to estimate 835 // because that opens up the possibility of miners introducing dummy, 836 // high fee transactions which would tend to then increase the average 837 // fee estimate. 838 // Tracking only previously known transactions forces miners trying to 839 // pull off this attack to broadcast their transactions and possibly 840 // forfeit their coins by having the transaction mined by a competitor. 841 log.Tracef("Processing previously unknown mined tx %s", txh) 842 return 843 } 844 845 stats.removeFromMemPool(blockHeight-desc.addedHeight, desc.fees) 846 delete(stats.memPoolTxs, *txh) 847 848 if blockHeight <= desc.addedHeight { 849 // This shouldn't usually happen but we need to explicitly test for 850 // because we can't account for non positive confirmation ranges in 851 // mined transactions. 852 log.Errorf("Mined transaction %s (%d) that was known from "+ 853 "mempool at a higher block height (%d)", txh, blockHeight, 854 desc.addedHeight) 855 return 856 } 857 858 mineDelay := blockHeight - desc.addedHeight 859 log.Debugf("Processing mined tx %s (rate %.8f, delay %d)", txh, 860 desc.fees/1e8, mineDelay) 861 stats.newMinedTx(mineDelay, desc.fees) 862 } 863 864 // ProcessBlock processes all mined transactions in the provided block. 865 // 866 // This function is safe to be called from multiple goroutines. 867 func (stats *Estimator) ProcessBlock(block *lbcutil.Block) error { 868 stats.lock.Lock() 869 defer stats.lock.Unlock() 870 871 if stats.bestHeight < 0 { 872 return nil 873 } 874 875 blockHeight := block.Height() 876 if blockHeight <= stats.bestHeight { 877 // we don't explicitly track reorgs right now 878 log.Warnf("Trying to process mined transactions at block %d when "+ 879 "previous best block was at height %d", blockHeight, 880 stats.bestHeight) 881 return nil 882 } 883 884 stats.updateMovingAverages(blockHeight) 885 886 for _, tx := range block.Transactions() { 887 stats.processMinedTransaction(blockHeight, tx.Hash()) 888 } 889 890 if stats.db != nil { 891 return stats.updateDatabase() 892 } 893 894 return nil 895 } 896 897 // Close closes the database (if it is currently opened). 898 func (stats *Estimator) Close() { 899 stats.lock.Lock() 900 901 if stats.db != nil { 902 log.Trace("Closing fee estimator database") 903 stats.db.Close() 904 stats.db = nil 905 } 906 907 stats.lock.Unlock() 908 }