gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/renter/hostdb/hostweight.go (about) 1 package hostdb 2 3 import ( 4 "fmt" 5 "math" 6 "strings" 7 "time" 8 9 "gitlab.com/NebulousLabs/errors" 10 11 "gitlab.com/SiaPrime/SiaPrime/build" 12 "gitlab.com/SiaPrime/SiaPrime/modules" 13 "gitlab.com/SiaPrime/SiaPrime/modules/renter/hostdb/hosttree" 14 "gitlab.com/SiaPrime/SiaPrime/types" 15 ) 16 17 const ( 18 // collateralExponentiation is the power to which we raise the weight 19 // during collateral adjustment when the collateral is large. This sublinear 20 // number ensures that there is not an overpreference on collateral when 21 // collateral is large relative to the size of the allowance. 22 collateralExponentiationLarge = 0.5 23 24 // collateralExponentiationSmall is the power to which we raise the weight 25 // during collateral adjustment when the collateral is small. A large number 26 // ensures a heavy focus on collateral when distinguishing between hosts 27 // that have a very small amount of collateral provided compared to the size 28 // of the allowance. 29 // 30 // For safety, this number needs to be larger than priceExponentiationSmall. 31 collateralExponentiationSmall = 4 32 33 // collateralFloor is a part of the equation for determining the collateral 34 // cutoff between large and small collateral. The equation figures out how 35 // much collateral is expected given the allowance, and then divided by 36 // 'collateralFloor' so that the cutoff for how much collateral counts as 37 // 'not much' is reasonably below what we are actually expecting from the 38 // host. 39 40 // collateralFloor determines how much lower than the expected collateral 41 // the host can provide before switching to a different scoring strategy. A 42 // collateral floor of 0.5 means that once the host is offering a collateral 43 // that is more than 50% of what the renter would expect given the amount of 44 // storage being used, the host switching to a scoring strategy which less 45 // intensly favors adding more collateral. As long as the host has provided 46 // sufficient skin-in-the-game, enormous amounts of extra collateral are 47 // less important. 48 // 49 // The collateralFloor is set relative to the price floor because generally 50 // we look for the collateral to be about 2x the price. 51 collateralFloor = priceFloor * 2 52 53 // interactionExponentiation determines how heavily we penalize hosts for 54 // having poor interactions - disconnecting, RPCs with errors, etc. The 55 // exponentiation is very high because the renter will already intentionally 56 // avoid hosts that do not have many successful interactions, meaning that 57 // the bad points do not rack up very quickly. 58 interactionExponentiation = 10 59 60 // priceExponentiationLarge is the number of times that the weight is 61 // divided by the price when the price is large relative to the allowance. 62 // The exponentiation is a lot higher because we care greatly about high 63 // priced hosts. 64 priceExponentiationLarge = 5 65 66 // priceExponentiationSmall is the number of times that the weight is 67 // divided by the price when the price is small relative to the allowance. 68 // The exponentiation is lower because we do not care about saving 69 // substantial amounts of money when the price is low. 70 priceExponentiationSmall = 0.75 71 72 // priceFloor determines how much cheaper than the expected allowance the 73 // host can be before switching to a different scoring strategy for the 74 // score. A price floor of 0.2 means that once the host is less than 20% of 75 // the expected price for that amount of resources (using the allowance as a 76 // guide), instead of using priceExponentiationLarge to reward decreasing 77 // prices, we use priceExponentiationSmall to reward decreasing prices. This 78 // reduced steepness reflects the reality that getting 99.9% off is not all 79 // that different from getting 80% off - both feel like an amazing deal. 80 // 81 // This is necessary to prevent exploits where a host gets an unreasonable 82 // score by putting it's price way too low. 83 priceFloor = 0.1 84 ) 85 86 var ( 87 // requiredStorage indicates the amount of storage that the host must be 88 // offering in order to be considered a valuable/worthwhile host. 89 requiredStorage = build.Select(build.Var{ 90 Standard: uint64(20e9), 91 Dev: uint64(1e6), 92 Testing: uint64(1e3), 93 }).(uint64) 94 ) 95 96 // collateralAdjustments improves the host's weight according to the amount of 97 // collateral that they have provided. 98 func (hdb *HostDB) collateralAdjustments(entry modules.HostDBEntry, allowance modules.Allowance) float64 { 99 // Ensure that all values will avoid divide by zero errors. 100 if allowance.Hosts == 0 { 101 allowance.Hosts = 1 102 } 103 if allowance.Period == 0 { 104 allowance.Period = 1 105 } 106 if allowance.ExpectedStorage == 0 { 107 allowance.ExpectedStorage = 1 108 } 109 if allowance.ExpectedUpload == 0 { 110 allowance.ExpectedUpload = 1 111 } 112 if allowance.ExpectedDownload == 0 { 113 allowance.ExpectedDownload = 1 114 } 115 if allowance.ExpectedRedundancy == 0 { 116 allowance.ExpectedRedundancy = 1 117 } 118 119 // Convert each element of the allowance into a number of resources that we 120 // expect to use in this contract. 121 contractExpectedFunds := allowance.Funds.Div64(allowance.Hosts) 122 contractExpectedStorage := uint64(float64(allowance.ExpectedStorage) * allowance.ExpectedRedundancy / float64(allowance.Hosts)) 123 contractExpectedStorageTime := types.NewCurrency64(contractExpectedStorage).Mul64(uint64(allowance.Period)) 124 125 // Ensure that the allowance and expected storage will not brush up against 126 // the max collateral. If the allowance comes within half of the max 127 // collateral, cap the collateral that we use during adjustments based on 128 // the max collateral instead of the per-byte collateral. 129 // 130 // The purpose of this code is to make sure that the host actually has a 131 // high enough MaxCollateral to cover all of the data that we intend to 132 // store with the host at the collateral price that the host is advertising. 133 // We add a 2x buffer to account for the fact that the renter may end up 134 // storing extra data on this host. 135 hostCollateral := entry.Collateral.Mul(contractExpectedStorageTime) 136 possibleCollateral := entry.MaxCollateral.Div64(2) 137 if possibleCollateral.Cmp(hostCollateral) < 0 { 138 hostCollateral = possibleCollateral 139 } 140 141 // Determine the cutoff for the difference between small collateral and 142 // large collateral. The cutoff is used to create a step function in the 143 // collateral scoring where decreasing collateral results in much higher 144 // penalties below a certain threshold. 145 // 146 // This threshold is attempting to be the threshold where the amount of 147 // money becomes insignificant. A collateral that is 10x higher than the 148 // price is not interesting, compelling, nor a sign of reliability if the 149 // price and collateral are both effectively zero. 150 // 151 // TODO: This method has no way to account for bandwidth heavy vs. storage 152 // heavy hosts, nor did we give the user any way to configure a situation 153 // where hosts aren't needed to be nearly as reliable. 154 cutoff := contractExpectedFunds.MulFloat(collateralFloor) 155 156 // Get the ratio between the cutoff and the actual collateral so we can 157 // award the bonus for having a large collateral. 158 collateral64, _ := hostCollateral.Float64() 159 cutoff64, _ := cutoff.Float64() 160 // If the hostCollateral is less than the cutoff, set the cutoff equal to 161 // the collateral so that the ratio has a minimum of 1, and also so that 162 // the smallWeight is computed based on the actual collateral instead of 163 // just the cutoff. 164 if collateral64 < cutoff64 { 165 cutoff64 = collateral64 166 } 167 // One last check for safety before grabbing the ratio. This ensures that 168 // the ratio is never less than one, which is critical to getting a coherent 169 // large weight - large weight should never be below one. 170 if collateral64 < 1 { 171 collateral64 = 1 172 } 173 if cutoff64 < 1 { 174 cutoff64 = 1 175 } 176 ratio := collateral64 / cutoff64 177 178 // Use the cutoff to determine the score based on the small exponentiation 179 // factor (which has a high exponentiation), and then use the ratio between 180 // the two to determine the bonus gained from having a high collateral. 181 smallWeight := math.Pow(cutoff64, collateralExponentiationSmall) 182 largeWeight := math.Pow(ratio, collateralExponentiationLarge) 183 return smallWeight * largeWeight 184 } 185 186 // durationAdjustments checks that the host has a maxduration which is larger 187 // than the period of the allowance. The host's score is heavily minimized if 188 // not. 189 func (hdb *HostDB) durationAdjustments(entry modules.HostDBEntry, allowance modules.Allowance) float64 { 190 if entry.MaxDuration < allowance.Period+allowance.RenewWindow { 191 return math.SmallestNonzeroFloat64 192 } 193 return 1 194 } 195 196 // interactionAdjustments determine the penalty to be applied to a host for the 197 // historic and current interactions with that host. This function focuses on 198 // historic interactions and ignores recent interactions. 199 func (hdb *HostDB) interactionAdjustments(entry modules.HostDBEntry) float64 { 200 // Give the host a baseline of 30 successful interactions and 1 failed 201 // interaction. This gives the host a baseline if we've had few 202 // interactions with them. The 1 failed interaction will become 203 // irrelevant after sufficient interactions with the host. 204 hsi := entry.HistoricSuccessfulInteractions + 30 205 hfi := entry.HistoricFailedInteractions + 1 206 207 // Determine the intraction ratio based off of the historic interactions. 208 ratio := float64(hsi) / float64(hsi+hfi) 209 return math.Pow(ratio, interactionExponentiation) 210 } 211 212 // priceAdjustments will adjust the weight of the entry according to the prices 213 // that it has set. 214 // 215 // REMINDER: The allowance contains an absolute number of bytes for expected 216 // storage on a per-renter basis that doesn't account for redundancy. This value 217 // needs to be adjusted to a per-contract basis that accounts for redundancy. 218 // The upload and download values also do not account for redundancy, and they 219 // are on a per-block basis, meaning you need to multiply be the allowance 220 // period when working with these values. 221 func (hdb *HostDB) priceAdjustments(entry modules.HostDBEntry, allowance modules.Allowance, txnFees types.Currency) float64 { 222 // Divide by zero mitigation. 223 if allowance.Hosts == 0 { 224 allowance.Hosts = 1 225 } 226 if allowance.Period == 0 { 227 allowance.Period = 1 228 } 229 if allowance.ExpectedStorage == 0 { 230 allowance.ExpectedStorage = 1 231 } 232 if allowance.ExpectedUpload == 0 { 233 allowance.ExpectedUpload = 1 234 } 235 if allowance.ExpectedDownload == 0 { 236 allowance.ExpectedDownload = 1 237 } 238 if allowance.ExpectedRedundancy == 0 { 239 allowance.ExpectedRedundancy = 1 240 } 241 242 // Convert each element of the allowance into a number of resources that we 243 // expect to use in this contract. 244 contractExpectedDownload := types.NewCurrency64(allowance.ExpectedDownload).Mul64(uint64(allowance.Period)).Div64(allowance.Hosts) 245 contractExpectedFunds := allowance.Funds.Div64(allowance.Hosts) 246 contractExpectedStorage := uint64(float64(allowance.ExpectedStorage) * allowance.ExpectedRedundancy / float64(allowance.Hosts)) 247 contractExpectedStorageTime := types.NewCurrency64(contractExpectedStorage).Mul64(uint64(allowance.Period)) 248 contractExpectedUpload := types.NewCurrency64(allowance.ExpectedUpload).Mul64(uint64(allowance.Period)).MulFloat(allowance.ExpectedRedundancy).Div64(allowance.Hosts) 249 250 // Calculate the hostCollateral the renter would expect the host to put 251 // into a contract. 252 // 253 contractTxnFees := txnFees.Mul64(modules.EstimatedFileContractTransactionSetSize) 254 _, _, hostCollateral, err := modules.RenterPayoutsPreTax(entry, contractExpectedFunds, contractTxnFees, types.ZeroCurrency, types.ZeroCurrency, allowance.Period, contractExpectedStorage) 255 if err != nil { 256 // Errors containing 'exceeds funding' are not logged. All it means is 257 // that the contract price (or some other price) of the host is too high 258 // for us to be able to form a contract with it, so this host is 259 // strictly not valuable given our allowance and it's pricing. This is 260 // common enough and expected enough that we don't need to log when it 261 // happens. 262 if !strings.Contains(err.Error(), "exceeds funding") { 263 info := fmt.Sprintf("Error while estimating collateral for host: Host %v, ContractPrice %v, TxnFees %v, Funds %v", entry.PublicKey.String(), entry.ContractPrice.HumanString(), txnFees.HumanString(), allowance.Funds.HumanString()) 264 hdb.log.Debugln(errors.AddContext(err, info)) 265 } 266 return math.SmallestNonzeroFloat64 267 } 268 269 // Determine the pricing for each type of resource in the contract. We have 270 // already converted the resources into absolute terms for this contract. 271 // 272 // The contract price and transaction fees get doubled because we expect 273 // that there will be on average one early renewal per contract, due to 274 // spending all of the contract's money. 275 contractPrice := entry.ContractPrice.Add(txnFees).Mul64(2) 276 downloadPrice := entry.DownloadBandwidthPrice.Mul(contractExpectedDownload) 277 storagePrice := entry.StoragePrice.Mul(contractExpectedStorageTime) 278 uploadPrice := entry.UploadBandwidthPrice.Mul(contractExpectedUpload) 279 siafundFee := contractPrice.Add(hostCollateral).Add(downloadPrice).Add(storagePrice).Add(uploadPrice).MulTax() 280 totalPrice := contractPrice.Add(downloadPrice).Add(storagePrice).Add(uploadPrice).Add(siafundFee) 281 282 // Determine a cutoff for whether the total price is considered a high price 283 // or a low price. This cutoff attempts to determine where the price becomes 284 // insignificant. 285 cutoff := contractExpectedFunds.MulFloat(priceFloor) 286 287 // Convert the price and cutoff to floats. 288 price64, _ := totalPrice.Float64() 289 cutoff64, _ := cutoff.Float64() 290 // If the total price is less than the cutoff, set the cutoff equal to the 291 // price. This ensures that the ratio (totalPrice / cutoff) can never be 292 // less than 1. 293 if price64 < cutoff64 { 294 cutoff64 = price64 295 } 296 297 // Check for less-than-one. 298 if price64 < 1 { 299 price64 = 1 300 } 301 if cutoff64 < 1 { 302 cutoff64 = 1 303 } 304 // Perform this check one more time after all of the conversions, just in 305 // case there was some sort of rounding error. 306 if price64 < cutoff64 { 307 cutoff64 = price64 308 } 309 ratio := price64 / cutoff64 310 311 smallWeight := math.Pow(cutoff64, priceExponentiationSmall) 312 largeWeight := math.Pow(ratio, priceExponentiationLarge) 313 314 return 1 / (smallWeight * largeWeight) 315 } 316 317 // storageRemainingAdjustments adjusts the weight of the entry according to how 318 // much storage it has remaining. 319 func (hdb *HostDB) storageRemainingAdjustments(entry modules.HostDBEntry, allowance modules.Allowance) float64 { 320 // Determine how much data the renter is storing on this host. 321 var storedData float64 322 if ci, exists := hdb.knownContracts[entry.PublicKey.String()]; exists { 323 storedData = float64(ci.StoredData) 324 } 325 326 // idealDataPerHost is the amount of data that we would have to put on each 327 // host assuming that our storage requirements were spread evenly across 328 // every single host. 329 idealDataPerHost := float64(allowance.ExpectedStorage) * allowance.ExpectedRedundancy / float64(allowance.Hosts) 330 // allocationPerHost is the amount of data that we would like to be able to 331 // put on each host, because data is not always spread evenly across the 332 // hosts during upload. Slower hosts may get very little data, more 333 // expensive hosts may get very little data, and other factors can skew the 334 // distribution. allocationPerHost takes into account the skew and tries to 335 // ensure that there's enough allocation per host to accommodate for a skew. 336 allocationPerHost := idealDataPerHost * storageSkewMultiplier 337 // hostExpectedStorage is the amount of storage that we expect to be able to 338 // store on this host overall, which should include the stored data that is 339 // already on the host. 340 hostExpectedStorage := (float64(entry.RemainingStorage) * storageCompetitionFactor) + storedData 341 // The score for the host is the square of the amount of storage we 342 // expected divided by the amount of storage we want. If we expect to be 343 // able to store more data on the host than we need to allocate, the host 344 // gets full score for storage. 345 if hostExpectedStorage >= allocationPerHost { 346 return 1 347 } 348 // Otherwise, the score of the host is the fraction of the data we expect 349 // raised to the storage penalty exponentiation. 350 storageRatio := hostExpectedStorage / allocationPerHost 351 return math.Pow(storageRatio, storagePenaltyExponentitaion) 352 } 353 354 // versionAdjustments will adjust the weight of the entry according to the siad 355 // version reported by the host. 356 // TODO move hardcoded version strings to build package or somewhere else 357 // so this has not to be altered on every new version 358 func versionAdjustments(entry modules.HostDBEntry) float64 { 359 base := float64(1) 360 if build.VersionCmp(entry.Version, "1.4.2") < 0 { 361 base = base * 0.99999 // Safety value to make sure we update the version penalties every time we update the host. 362 } 363 // Light penalty for hosts below 1.4.1 364 if build.VersionCmp(entry.Version, "1.4.1") < 0 { 365 base = base * 0.7 366 } 367 // Heavy penalty for hosts below v1.4.0 368 if build.VersionCmp(entry.Version, "1.4.0") < 0 { 369 base = math.SmallestNonzeroFloat64 370 } 371 return base 372 } 373 374 // lifetimeAdjustments will adjust the weight of the host according to the total 375 // amount of time that has passed since the host's original announcement. 376 func (hdb *HostDB) lifetimeAdjustments(entry modules.HostDBEntry) float64 { 377 base := float64(1) 378 if hdb.blockHeight >= entry.FirstSeen { 379 age := hdb.blockHeight - entry.FirstSeen 380 if age < 12000 { 381 base = base * 2 / 3 // 1.5x total 382 } 383 if age < 6000 { 384 base = base / 2 // 3x total 385 } 386 if age < 4000 { 387 base = base / 2 // 6x total 388 } 389 if age < 2000 { 390 base = base / 2 // 12x total 391 } 392 if age < 1000 { 393 base = base / 3 // 36x total 394 } 395 if age < 576 { 396 base = base / 3 // 108x total 397 } 398 if age < 288 { 399 base = base / 3 // 324x total 400 } 401 if age < 144 { 402 base = base / 3 // 972x total 403 } 404 } 405 return base 406 } 407 408 // uptimeAdjustments penalizes the host for having poor uptime, and for being 409 // offline. 410 // 411 // CAUTION: The function 'updateEntry' will manually fill out two scans for a 412 // new host to give the host some initial uptime or downtime. Modification of 413 // this function needs to be made paying attention to the structure of that 414 // function. 415 // 416 // TODO: This function doesn't correctly handle situations where the user's 417 // clock goes back in time. If the user adjusts their system clock to be in the 418 // past, we'll get timestamping that's out of order, and this will cause erratic 419 // / improper / untested behavior. 420 func (hdb *HostDB) uptimeAdjustments(entry modules.HostDBEntry) float64 { 421 // Special case: if we have scanned the host twice or fewer, don't perform 422 // uptime math. 423 if len(entry.ScanHistory) == 0 { 424 return 0.25 425 } 426 if len(entry.ScanHistory) == 1 { 427 if entry.ScanHistory[0].Success { 428 return 0.75 429 } 430 return 0.25 431 } 432 if len(entry.ScanHistory) == 2 { 433 if entry.ScanHistory[0].Success && entry.ScanHistory[1].Success { 434 return 0.85 435 } 436 if entry.ScanHistory[0].Success || entry.ScanHistory[1].Success { 437 return 0.50 438 } 439 return 0.05 440 } 441 442 // Compute the total measured uptime and total measured downtime for this 443 // host. 444 downtime := entry.HistoricDowntime 445 uptime := entry.HistoricUptime 446 recentTime := entry.ScanHistory[0].Timestamp 447 recentSuccess := entry.ScanHistory[0].Success 448 for _, scan := range entry.ScanHistory[1:] { 449 if recentTime.After(scan.Timestamp) { 450 if build.DEBUG { 451 hdb.log.Critical("Host entry scan history not sorted.") 452 } else { 453 hdb.log.Print("WARN: Host entry scan history not sorted.") 454 } 455 // Ignore the unsorted scan entry. 456 continue 457 } 458 if recentSuccess { 459 uptime += scan.Timestamp.Sub(recentTime) 460 } else { 461 downtime += scan.Timestamp.Sub(recentTime) 462 } 463 recentTime = scan.Timestamp 464 recentSuccess = scan.Success 465 } 466 467 // One more check to incorporate the uptime or downtime of the most recent 468 // scan, we assume that if we scanned them right now, their uptime / 469 // downtime status would be equal to what it currently is. 470 if recentSuccess { 471 uptime += time.Now().Sub(recentTime) 472 } else { 473 downtime += time.Now().Sub(recentTime) 474 } 475 476 // Sanity check against 0 total time. 477 if uptime == 0 && downtime == 0 { 478 build.Critical("uptime and downtime are zero for this host, should have been caught in earlier logic") 479 return math.SmallestNonzeroFloat64 480 } 481 482 // Compute the uptime ratio, but shift by 0.02 to acknowledge fully that 483 // 98% uptime and 100% uptime is valued the same. 484 uptimeRatio := float64(uptime) / float64(uptime+downtime) 485 if uptimeRatio > 0.98 { 486 uptimeRatio = 0.98 487 } 488 uptimeRatio += 0.02 489 490 // Cap the total amount of downtime allowed based on the total number of 491 // scans that have happened. 492 allowedDowntime := 0.03 * float64(len(entry.ScanHistory)) 493 if uptimeRatio < 1-allowedDowntime { 494 uptimeRatio = 1 - allowedDowntime 495 } 496 497 // Calculate the penalty for low uptime. Penalties increase extremely 498 // quickly as uptime falls away from 95%. 499 // 500 // 100% uptime = 1 501 // 98% uptime = 1 502 // 95% uptime = 0.83 503 // 90% uptime = 0.26 504 // 85% uptime = 0.03 505 // 80% uptime = 0.001 506 // 75% uptime = 0.00001 507 // 70% uptime = 0.0000001 508 exp := 200 * math.Min(1-uptimeRatio, 0.30) 509 return math.Pow(uptimeRatio, exp) 510 } 511 512 // managedCalculateHostWeightFn creates a hosttree.WeightFunc given an Allowance. 513 func (hdb *HostDB) managedCalculateHostWeightFn(allowance modules.Allowance) hosttree.WeightFunc { 514 // Get the txnFees. 515 hdb.mu.RLock() 516 txnFees := hdb.txnFees 517 hdb.mu.RUnlock() 518 // Create the weight function. 519 return func(entry modules.HostDBEntry) hosttree.ScoreBreakdown { 520 return hosttree.HostAdjustments{ 521 BurnAdjustment: 1, 522 CollateralAdjustment: hdb.collateralAdjustments(entry, allowance), 523 DurationAdjustment: hdb.durationAdjustments(entry, allowance), 524 InteractionAdjustment: hdb.interactionAdjustments(entry), 525 AgeAdjustment: hdb.lifetimeAdjustments(entry), 526 PriceAdjustment: hdb.priceAdjustments(entry, allowance, txnFees), 527 StorageRemainingAdjustment: hdb.storageRemainingAdjustments(entry, allowance), 528 UptimeAdjustment: hdb.uptimeAdjustments(entry), 529 VersionAdjustment: versionAdjustments(entry), 530 } 531 } 532 } 533 534 // EstimateHostScore takes a HostExternalSettings and returns the estimated 535 // score of that host in the hostdb, assuming no penalties for age or uptime. 536 func (hdb *HostDB) EstimateHostScore(entry modules.HostDBEntry, allowance modules.Allowance) (modules.HostScoreBreakdown, error) { 537 if err := hdb.tg.Add(); err != nil { 538 return modules.HostScoreBreakdown{}, err 539 } 540 defer hdb.tg.Done() 541 return hdb.managedEstimatedScoreBreakdown(entry, allowance, true, true, true), nil 542 } 543 544 // ScoreBreakdown provdes a detailed set of scalars and bools indicating 545 // elements of the host's overall score. 546 func (hdb *HostDB) ScoreBreakdown(entry modules.HostDBEntry) (modules.HostScoreBreakdown, error) { 547 if err := hdb.tg.Add(); err != nil { 548 return modules.HostScoreBreakdown{}, err 549 } 550 defer hdb.tg.Done() 551 return hdb.managedScoreBreakdown(entry, false, false, false), nil 552 } 553 554 // managedEstimatedScoreBreakdown computes the score breakdown of a host. 555 // Certain adjustments can be ignored. 556 func (hdb *HostDB) managedEstimatedScoreBreakdown(entry modules.HostDBEntry, allowance modules.Allowance, ignoreAge, ignoreDuration, ignoreUptime bool) modules.HostScoreBreakdown { 557 hosts := hdb.ActiveHosts() 558 weightFunc := hdb.managedCalculateHostWeightFn(allowance) 559 560 // Compute the totalScore. 561 hdb.mu.Lock() 562 defer hdb.mu.Unlock() 563 totalScore := types.Currency{} 564 for _, host := range hosts { 565 totalScore = totalScore.Add(hdb.weightFunc(host).Score()) 566 } 567 // Compute the breakdown. 568 569 return weightFunc(entry).HostScoreBreakdown(totalScore, ignoreAge, ignoreDuration, ignoreUptime) 570 } 571 572 // managedScoreBreakdown computes the score breakdown of a host. Certain 573 // adjustments can be ignored. 574 func (hdb *HostDB) managedScoreBreakdown(entry modules.HostDBEntry, ignoreAge, ignoreDuration, ignoreUptime bool) modules.HostScoreBreakdown { 575 hosts := hdb.ActiveHosts() 576 577 // Compute the totalScore. 578 hdb.mu.Lock() 579 defer hdb.mu.Unlock() 580 totalScore := types.Currency{} 581 for _, host := range hosts { 582 totalScore = totalScore.Add(hdb.weightFunc(host).Score()) 583 } 584 // Compute the breakdown. 585 return hdb.weightFunc(entry).HostScoreBreakdown(totalScore, ignoreAge, ignoreDuration, ignoreUptime) 586 }