gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/hostdb/scan.go (about)

     1  package hostdb
     2  
     3  // scan.go contains the functions which periodically scan the list of all hosts
     4  // to see which hosts are online or offline, and to get any updates to the
     5  // settings of the hosts.
     6  
     7  import (
     8  	"encoding/json"
     9  	"fmt"
    10  	"net"
    11  	"sort"
    12  	"time"
    13  
    14  	"gitlab.com/NebulousLabs/errors"
    15  	"gitlab.com/NebulousLabs/fastrand"
    16  	"gitlab.com/NebulousLabs/siamux"
    17  	"gitlab.com/NebulousLabs/siamux/mux"
    18  
    19  	"gitlab.com/SkynetLabs/skyd/build"
    20  	"gitlab.com/SkynetLabs/skyd/skymodules"
    21  	"gitlab.com/SkynetLabs/skyd/skymodules/renter/hostdb/hosttree"
    22  	"go.sia.tech/siad/modules"
    23  	"go.sia.tech/siad/types"
    24  )
    25  
    26  var (
    27  	// scanTimeElapsedRequirement defines the amount of time that must elapse
    28  	// between scans in order for a new scan to be accepted into the hostdb as
    29  	// part of the scan history.
    30  	scanTimeElapsedRequirement = build.Select(build.Var{
    31  		Standard: 60 * time.Minute,
    32  		Dev:      2 * time.Minute,
    33  		Testing:  500 * time.Millisecond,
    34  	}).(time.Duration)
    35  )
    36  
    37  // equalIPNets checks if two slices of IP subnets contain the same subnets.
    38  func equalIPNets(ipNetsA, ipNetsB []string) bool {
    39  	// Check the length first.
    40  	if len(ipNetsA) != len(ipNetsB) {
    41  		return false
    42  	}
    43  	// Create a map of all the subnets in ipNetsA.
    44  	mapNetsA := make(map[string]struct{})
    45  	for _, subnet := range ipNetsA {
    46  		mapNetsA[subnet] = struct{}{}
    47  	}
    48  	// Make sure that all the subnets from ipNetsB are in the map.
    49  	for _, subnet := range ipNetsB {
    50  		if _, exists := mapNetsA[subnet]; !exists {
    51  			return false
    52  		}
    53  	}
    54  	return true
    55  }
    56  
    57  // feeChangeSignificant determines if the difference between two transaction
    58  // fees is significant enough to warrant rebuilding the hosttree.
    59  func feeChangeSignificant(oldTxnFees, newTxnFees types.Currency) bool {
    60  	maxChange := oldTxnFees.MulFloat(txnFeesUpdateRatio)
    61  	return newTxnFees.Cmp(oldTxnFees.Sub(maxChange)) <= 0 || newTxnFees.Cmp(oldTxnFees.Add(maxChange)) >= 0
    62  }
    63  
    64  // managedUpdateTxnFees checks if the txnFees have changed significantly since
    65  // the last time they were updated and updates them if necessary.
    66  func (hdb *HostDB) managedUpdateTxnFees() {
    67  	// Get the old txnFees from the hostdb.
    68  	hdb.mu.RLock()
    69  	allowance := hdb.allowance
    70  	oldTxnFees := hdb.txnFees
    71  	hdb.mu.RUnlock()
    72  
    73  	// Get the new fees from the tpool.
    74  	_, newTxnFees := hdb.staticTpool.FeeEstimation()
    75  
    76  	// If the change is not significant we are done.
    77  	if !feeChangeSignificant(oldTxnFees, newTxnFees) {
    78  		hdb.staticLog.Debugf("No need to update txnFees oldFees %v newFees %v",
    79  			oldTxnFees.HumanString(), newTxnFees.HumanString())
    80  		return
    81  	}
    82  	// Update the txnFees.
    83  	hdb.mu.Lock()
    84  	hdb.txnFees = newTxnFees
    85  	hdb.mu.Unlock()
    86  	// Recompute the host weight function.
    87  	hwf := hdb.managedCalculateHostWeightFn(allowance)
    88  	// Set the weight function.
    89  	if err := hdb.managedSetWeightFunction(hwf); err != nil {
    90  		build.Critical("Failed to set the new weight function", err)
    91  	}
    92  	hdb.staticLog.Println("Updated the hostdb txnFees to", newTxnFees.HumanString())
    93  }
    94  
    95  // queueScan will add a host to the queue to be scanned. The host will be added
    96  // at a random position which means that the order in which queueScan is called
    97  // is not necessarily the order in which the hosts get scanned. That guarantees
    98  // a random scan order during the initial scan.
    99  func (hdb *HostDB) queueScan(entry skymodules.HostDBEntry) {
   100  	// If this entry is already in the scan pool, can return immediately.
   101  	_, exists := hdb.scanMap[entry.PublicKey.String()]
   102  	if exists {
   103  		return
   104  	}
   105  	// Add the entry to a random position in the waitlist.
   106  	hdb.scanMap[entry.PublicKey.String()] = struct{}{}
   107  	hdb.scanList = append(hdb.scanList, entry)
   108  	if len(hdb.scanList) > 1 {
   109  		i := len(hdb.scanList) - 1
   110  		j := fastrand.Intn(i)
   111  		hdb.scanList[i], hdb.scanList[j] = hdb.scanList[j], hdb.scanList[i]
   112  	}
   113  	// Check if any thread is currently emptying the waitlist. If not, spawn a
   114  	// thread to empty the waitlist.
   115  	if hdb.scanWait {
   116  		// Another thread is emptying the scan list, nothing to worry about.
   117  		return
   118  	}
   119  
   120  	// Sanity check - the scan map and the scan list should have the same
   121  	// length.
   122  	if build.DEBUG && len(hdb.scanMap) > len(hdb.scanList)+maxScanningThreads {
   123  		hdb.staticLog.Critical("The hostdb scan map has seemingly grown too large:", len(hdb.scanMap), len(hdb.scanList), maxScanningThreads)
   124  	}
   125  
   126  	// Nobody is emptying the scan list, create and run a scan thread.
   127  	hdb.scanWait = true
   128  	go func() {
   129  		scanPool := make(chan skymodules.HostDBEntry)
   130  		defer close(scanPool)
   131  
   132  		if hdb.tg.Add() != nil {
   133  			// Hostdb is shutting down, don't spin up another thread.  It is
   134  			// okay to leave scanWait set to true as that will not affect
   135  			// shutdown.
   136  			return
   137  		}
   138  		defer hdb.tg.Done()
   139  
   140  		// Block scan when a specific dependency is provided.
   141  		hdb.staticDeps.Disrupt("BlockScan")
   142  
   143  		// Due to the patterns used to spin up scanning threads, it's possible
   144  		// that we get to this point while all scanning threads are currently
   145  		// used up, completing jobs that were sent out by the previous pool
   146  		// managing thread. This thread is at risk of deadlocking if there's
   147  		// not at least one scanning thread accepting work that it created
   148  		// itself, so we use a starterThread exception and spin up
   149  		// one-thread-too-many on the first iteration to ensure that we do not
   150  		// deadlock.
   151  		starterThread := false
   152  		for {
   153  			// If the scanList is empty, this thread can spin down.
   154  			hdb.mu.Lock()
   155  			if len(hdb.scanList) == 0 {
   156  				// Scan list is empty, can exit. Let the world know that nobody
   157  				// is emptying the scan list anymore.
   158  				hdb.scanWait = false
   159  				hdb.mu.Unlock()
   160  				return
   161  			}
   162  
   163  			// Get the next host, shrink the scan list.
   164  			entry := hdb.scanList[0]
   165  			hdb.scanList = hdb.scanList[1:]
   166  			delete(hdb.scanMap, entry.PublicKey.String())
   167  			scansRemaining := len(hdb.scanList)
   168  
   169  			// Grab the most recent entry for this host.
   170  			recentEntry, exists := hdb.staticHostTree.Select(entry.PublicKey)
   171  			if exists {
   172  				entry = recentEntry
   173  			}
   174  
   175  			// Try to send this entry to an existing idle worker (non-blocking).
   176  			select {
   177  			case scanPool <- entry:
   178  				hdb.staticLog.Debugf("Sending host %v for scan, %v hosts remain", entry.PublicKey.String(), scansRemaining)
   179  				hdb.mu.Unlock()
   180  				continue
   181  			default:
   182  			}
   183  
   184  			// Create new worker thread.
   185  			if hdb.scanningThreads < maxScanningThreads || !starterThread {
   186  				starterThread = true
   187  				hdb.scanningThreads++
   188  				if err := hdb.tg.Add(); err != nil {
   189  					hdb.mu.Unlock()
   190  					return
   191  				}
   192  				go func() {
   193  					defer hdb.tg.Done()
   194  					hdb.threadedProbeHosts(scanPool)
   195  					hdb.mu.Lock()
   196  					hdb.scanningThreads--
   197  					hdb.mu.Unlock()
   198  				}()
   199  			}
   200  			hdb.mu.Unlock()
   201  
   202  			// Block while waiting for an opening in the scan pool.
   203  			hdb.staticLog.Debugf("Sending host %v for scan, %v hosts remain", entry.PublicKey.String(), scansRemaining)
   204  			select {
   205  			case scanPool <- entry:
   206  				continue
   207  			case <-hdb.tg.StopChan():
   208  				return
   209  			}
   210  		}
   211  	}()
   212  }
   213  
   214  // IsMalicious is a method to ask the hostdb for whether or not it believes
   215  // a host is malicious. Right now this only checks for a bad score but will be
   216  // extended in the future.
   217  func (hdb *HostDB) IsMalicious(entry skymodules.HostDBEntry) (bool, error) {
   218  	sb, err := hdb.managedScoreBreakdown(entry, false, false, false)
   219  	if err != nil {
   220  		return false, err
   221  	}
   222  	return sb.Score.Cmp(types.NewCurrency64(1)) <= 0, nil
   223  }
   224  
   225  // updateEntry updates an entry in the hostdb after a scan has taken place.
   226  //
   227  // CAUTION: This function will automatically add multiple entries to a new host
   228  // to give that host some base uptime. This makes this function co-dependent
   229  // with the host weight functions. Adjustment of the host weight functions need
   230  // to keep this function in mind, and vice-versa.
   231  func (hdb *HostDB) updateEntry(entry skymodules.HostDBEntry, netErr error) {
   232  	// If the scan failed because we don't have Internet access, toss out this update.
   233  	if netErr != nil && !hdb.staticGateway.Online() {
   234  		return
   235  	}
   236  
   237  	// Grab the host from the host tree, and update it with the new settings.
   238  	newEntry, exists := hdb.staticHostTree.Select(entry.PublicKey)
   239  	if exists {
   240  		newEntry.HostExternalSettings = entry.HostExternalSettings
   241  		newEntry.IPNets = entry.IPNets
   242  		newEntry.LastIPNetChange = entry.LastIPNetChange
   243  	} else {
   244  		newEntry = entry
   245  	}
   246  
   247  	// Update the recent interactions with this host.
   248  	//
   249  	// No decay applied because block height is unknown.
   250  	if netErr == nil {
   251  		newEntry.RecentSuccessfulInteractions++
   252  	} else {
   253  		newEntry.RecentFailedInteractions++
   254  	}
   255  
   256  	// Add the datapoints for the scan.
   257  	if len(newEntry.ScanHistory) < 2 {
   258  		// COMPATv1.1.0
   259  		//
   260  		// The host did not always track its block height correctly, meaning
   261  		// that previously the FirstSeen values and the blockHeight values could
   262  		// get out of sync.
   263  		//
   264  		// NOTE: this check used to happen when we loaded the host db from the
   265  		// persistence file, this causes issues with the 'suggestedStartTime' we
   266  		// calculate below because we might have processed a consensus change
   267  		// containing reverted blocks. This made it so the block height was less
   268  		// than 'Firstseen' which caused 'suggestedStartTime' to be in the
   269  		// future. The end result is that the first two entries are not in
   270  		// incrementing order, causing all consecutive entries to get ignored,
   271  		// which messes with the host's uptime stats.
   272  		if hdb.blockHeight < entry.FirstSeen {
   273  			entry.FirstSeen = hdb.blockHeight
   274  		}
   275  
   276  		// Add two scans to the scan history. Two are needed because the scans
   277  		// are forward looking, but we want this first scan to represent as
   278  		// much as one week of uptime or downtime.
   279  		earliestStartTime := time.Now().Add(time.Hour * 2 * 24 * -1)                                                   // Permit up two days starting uptime or downtime.
   280  		suggestedStartTime := time.Now().Add(time.Minute * 10 * time.Duration(hdb.blockHeight-entry.FirstSeen+1) * -1) // Add one to the FirstSeen in case FirstSeen is this block, guarantees incrementing order.
   281  		if suggestedStartTime.Before(earliestStartTime) {
   282  			suggestedStartTime = earliestStartTime
   283  		}
   284  		newEntry.ScanHistory = skymodules.HostDBScans{
   285  			{Timestamp: suggestedStartTime, Success: netErr == nil},
   286  			{Timestamp: time.Now(), Success: netErr == nil},
   287  		}
   288  	} else {
   289  		// Do not add a new timestamp for the scan unless more than an hour has
   290  		// passed since the previous scan.
   291  		newTimestamp := time.Now()
   292  		prevTimestamp := newEntry.ScanHistory[len(newEntry.ScanHistory)-1].Timestamp
   293  		if newTimestamp.After(prevTimestamp.Add(scanTimeElapsedRequirement)) {
   294  			if newEntry.ScanHistory[len(newEntry.ScanHistory)-1].Success && netErr != nil {
   295  				hdb.staticLog.Printf("Host %v is being downgraded from an online host to an offline host: %v\n", newEntry.PublicKey.String(), netErr)
   296  			}
   297  			newEntry.ScanHistory = append(newEntry.ScanHistory, skymodules.HostDBScan{Timestamp: newTimestamp, Success: netErr == nil})
   298  		}
   299  	}
   300  
   301  	// COMPATv1.6.3
   302  	//
   303  	// The host db would reset the entry's 'FirstSeen' value to the blockheight
   304  	// at that point in time. However, this happened on load, when consensus was
   305  	// potentially unsynced, meaning there might be consensus changes holding
   306  	// reverts that put the hostdb height below the firstseen value. This caused
   307  	// initial scans where the history was unsorted from the get-go. We need to
   308  	// alter these historic scans because all consecutive scans get ignored due
   309  	// to the history being unsorted.
   310  	if updated := compatv164SortEntryScans(&newEntry); updated {
   311  		hdb.staticLog.Printf("compatv164SortEntryScans: sorted scan history for host %v\n", newEntry.PublicKey.String())
   312  	}
   313  
   314  	// Check whether any of the recent scans demonstrate uptime. The pruning and
   315  	// compression of the history ensure that there are only relatively recent
   316  	// scans represented.
   317  	var recentUptime bool
   318  	for _, scan := range newEntry.ScanHistory {
   319  		if scan.Success {
   320  			recentUptime = true
   321  		}
   322  	}
   323  
   324  	// If the host has been offline for too long, delete the host from the
   325  	// hostdb. Only delete if there have been enough scans over a long enough
   326  	// period to be confident that the host really is offline for good, and if we
   327  	// don't have any contracts with that host.
   328  	_, haveContractWithHost := hdb.knownContracts[newEntry.PublicKey.String()]
   329  	downPastMaxDowntime := time.Since(newEntry.ScanHistory[0].Timestamp) > maxHostDowntime && !recentUptime
   330  	if !haveContractWithHost && downPastMaxDowntime && len(newEntry.ScanHistory) >= minScans {
   331  		if newEntry.HistoricUptime > 0 {
   332  			hdb.staticLog.Printf("Removing %v with historic uptime from hostdb. Recent downtime timestamp is %v. Hostdb knows about %v contracts.", newEntry.PublicKey.String(), newEntry.ScanHistory[0].Timestamp, len(hdb.knownContracts))
   333  		}
   334  		// Remove the host from the hostdb.
   335  		err := hdb.remove(newEntry.PublicKey)
   336  		if err != nil {
   337  			hdb.staticLog.Println("ERROR: unable to remove host newEntry which has had a ton of downtime:", err)
   338  		}
   339  
   340  		// The function should terminate here as no more interaction is needed
   341  		// with this host.
   342  		return
   343  	}
   344  
   345  	// Compress any old scans into the historic values.
   346  	for len(newEntry.ScanHistory) > minScans && time.Since(newEntry.ScanHistory[0].Timestamp) > maxHostDowntime {
   347  		timePassed := newEntry.ScanHistory[1].Timestamp.Sub(newEntry.ScanHistory[0].Timestamp)
   348  		if newEntry.ScanHistory[0].Success {
   349  			newEntry.HistoricUptime += timePassed
   350  		} else {
   351  			newEntry.HistoricDowntime += timePassed
   352  		}
   353  		newEntry.ScanHistory = newEntry.ScanHistory[1:]
   354  	}
   355  
   356  	// Add the updated entry
   357  	if !exists {
   358  		// Insert into Hosttrees
   359  		err := hdb.insert(newEntry)
   360  		if errors.Contains(err, errHostDomainBlocked) {
   361  			hdb.staticLog.Debugf("Could not insert host %v to the hostdb. Host domain blocked %v\n", newEntry.PublicKey.String(), newEntry.NetAddress)
   362  		} else if err != nil {
   363  			hdb.staticLog.Println("ERROR: unable to insert entry which is was thought to be new:", err)
   364  		} else {
   365  			hdb.staticLog.Debugf("Adding host %v to the hostdb. Net error: %v\n", newEntry.PublicKey.String(), netErr)
   366  		}
   367  	} else {
   368  		// Modify hosttrees
   369  		err := hdb.modify(newEntry)
   370  		if errors.Contains(err, errHostDomainBlocked) {
   371  			// Host is blocked, remove from the hosttree
   372  			err = errors.AddContext(hdb.remove(newEntry.PublicKey), "unable to remove blocked host from host tree")
   373  		}
   374  		if err != nil {
   375  			hdb.staticLog.Println("ERROR: unable to modify entry which is thought to exist:", err)
   376  		} else {
   377  			hdb.staticLog.Debugf("Adding host %v to the hostdb. Net error: %v\n", newEntry.PublicKey.String(), netErr)
   378  		}
   379  	}
   380  }
   381  
   382  // staticLookupIPNets returns string representations of the CIDR subnets used by
   383  // the host. In case of an error we return nil. We don't really care about the
   384  // error because we don't update host entries if we are offline anyway. So if we
   385  // fail to resolve a hostname, the problem is not related to us.
   386  func (hdb *HostDB) staticLookupIPNets(address modules.NetAddress) (ipNets []string, err error) {
   387  	// Lookup the IP addresses of the host.
   388  	addresses, err := hdb.staticDeps.Resolver().LookupIP(address.Host())
   389  	if err != nil {
   390  		return nil, err
   391  	}
   392  	// Get the subnets of the addresses.
   393  	for _, ip := range addresses {
   394  		// Set the filterRange according to the type of IP address.
   395  		var filterRange int
   396  		if ip.To4() != nil {
   397  			filterRange = hosttree.IPv4FilterRange
   398  		} else {
   399  			filterRange = hosttree.IPv6FilterRange
   400  		}
   401  
   402  		// Get the subnet.
   403  		_, ipnet, err := net.ParseCIDR(fmt.Sprintf("%s/%d", ip.String(), filterRange))
   404  		if err != nil {
   405  			return nil, err
   406  		}
   407  		// Add the subnet to the host.
   408  		ipNets = append(ipNets, ipnet.String())
   409  	}
   410  	return
   411  }
   412  
   413  // managedScanHost will connect to a host and grab the settings, verifying
   414  // uptime and updating to the host's preferences.
   415  func (hdb *HostDB) managedScanHost(entry skymodules.HostDBEntry) {
   416  	// Request settings from the queued host entry.
   417  	netAddr := entry.NetAddress
   418  	pubKey := entry.PublicKey
   419  	hdb.staticLog.Debugf("Scanning host %v at %v", pubKey, netAddr)
   420  
   421  	// If we use a custom resolver for testing, we replace the custom domain
   422  	// with 127.0.0.1. Otherwise the scan will fail.
   423  	if hdb.staticDeps.Disrupt("customResolver") {
   424  		port := netAddr.Port()
   425  		netAddr = modules.NetAddress(fmt.Sprintf("127.0.0.1:%s", port))
   426  	}
   427  
   428  	// Resolve the host's used subnets and update the timestamp if they
   429  	// changed. We only update the timestamp if resolving the ipNets was
   430  	// successful.
   431  	ipNets, err := hdb.staticLookupIPNets(entry.NetAddress)
   432  	if err == nil && !equalIPNets(ipNets, entry.IPNets) {
   433  		entry.IPNets = ipNets
   434  		entry.LastIPNetChange = time.Now()
   435  	}
   436  	if err != nil {
   437  		hdb.staticLog.Debugln("mangedScanHost: failed to look up IP nets", err)
   438  	}
   439  
   440  	// Update historic interactions of entry if necessary
   441  	hdb.mu.Lock()
   442  	updateHostHistoricInteractions(&entry, hdb.blockHeight)
   443  	hdb.mu.Unlock()
   444  
   445  	var settings modules.HostExternalSettings
   446  	var latency time.Duration
   447  	err = func() error {
   448  		// Disrupt the host scan by returning an error here simulating a failed
   449  		// host interaction.
   450  		if hdb.staticDeps.Disrupt("InterruptHostScan") {
   451  			return errors.New("InterruptHostScan")
   452  		}
   453  
   454  		timeout := hostRequestTimeout
   455  		hdb.mu.RLock()
   456  		if len(hdb.initialScanLatencies) > minScansForSpeedup {
   457  			build.Critical("initialScanLatencies should never be greater than minScansForSpeedup")
   458  		}
   459  		if !hdb.initialScanComplete && len(hdb.initialScanLatencies) == minScansForSpeedup {
   460  			// During an initial scan, when we have at least minScansForSpeedup
   461  			// active scans in initialScanLatencies, we use
   462  			// 5*median(initialScanLatencies) as the new hostRequestTimeout to
   463  			// speedup the scanning process.
   464  			timeout = hdb.initialScanLatencies[len(hdb.initialScanLatencies)/2]
   465  			timeout *= scanSpeedupMedianMultiplier
   466  			if hostRequestTimeout < timeout {
   467  				timeout = hostRequestTimeout
   468  			}
   469  		}
   470  		hdb.mu.RUnlock()
   471  
   472  		dialer := &net.Dialer{
   473  			Cancel:  hdb.tg.StopChan(),
   474  			Timeout: timeout,
   475  		}
   476  		start := time.Now()
   477  		conn, err := dialer.Dial("tcp", string(netAddr))
   478  		latency = time.Since(start)
   479  		if err != nil {
   480  			return err
   481  		}
   482  		// Create go routine that will close the channel if the hostdb shuts
   483  		// down or when this method returns as signalled by closing the
   484  		// connCloseChan channel
   485  		connCloseChan := make(chan struct{})
   486  		go func() {
   487  			select {
   488  			case <-hdb.tg.StopChan():
   489  			case <-connCloseChan:
   490  			}
   491  			conn.Close()
   492  		}()
   493  		defer close(connCloseChan)
   494  		conn.SetDeadline(time.Now().Add(hostScanDeadline))
   495  
   496  		// Try to talk to the host using RHP2. If the host does not respond to
   497  		// the RHP2 request, consider the scan a failure.
   498  		s, _, err := modules.NewRenterSession(conn, pubKey)
   499  		if err != nil {
   500  			return errors.AddContext(err, "could not open RHP2 session")
   501  		}
   502  		defer s.WriteRequest(modules.RPCLoopExit, nil) // make sure we close cleanly
   503  		if err := s.WriteRequest(modules.RPCLoopSettings, nil); err != nil {
   504  			return errors.AddContext(err, "could not write the loop settings request in the RHP2 check")
   505  		}
   506  		var resp modules.LoopSettingsResponse
   507  		if err := s.ReadResponse(&resp, maxSettingsLen); err != nil {
   508  			return errors.AddContext(err, "could not read the settings response")
   509  		}
   510  		err = json.Unmarshal(resp.Settings, &settings)
   511  		if err != nil {
   512  			return errors.AddContext(err, "could not unmarshal the settings response")
   513  		}
   514  		// If the host's version is lower than v1.4.12, which is the version
   515  		// at which the following fields were added to the host's external
   516  		// settings, we set these values to their original defaults to
   517  		// ensure these hosts are not penalized by renters running the
   518  		// latest software.
   519  		if build.VersionCmp(settings.Version, "1.4.12") < 0 {
   520  			settings.EphemeralAccountExpiry = modules.CompatV1412DefaultEphemeralAccountExpiry
   521  			settings.MaxEphemeralAccountBalance = modules.CompatV1412DefaultMaxEphemeralAccountBalance
   522  		}
   523  
   524  		// Need to apply the custom resolver to the siamux address.
   525  		siamuxAddr := settings.SiaMuxAddress()
   526  		if hdb.staticDeps.Disrupt("customResolver") {
   527  			port := modules.NetAddress(siamuxAddr).Port()
   528  			siamuxAddr = fmt.Sprintf("127.0.0.1:%s", port)
   529  		}
   530  
   531  		// Try opening a connection to the siamux, this is a very lightweight
   532  		// way of checking that RHP3 is supported.
   533  		_, err = fetchPriceTable(hdb.staticMux, siamuxAddr, timeout, modules.SiaPKToMuxPK(entry.PublicKey))
   534  		if err != nil {
   535  			hdb.staticLog.Debugf("%v siamux ping not successful: %v\n", entry.PublicKey, err)
   536  			return err
   537  		}
   538  		return nil
   539  	}()
   540  	if err != nil {
   541  		hdb.staticLog.Debugf("Scan of host at %v failed: %v", pubKey, err)
   542  	} else {
   543  		hdb.staticLog.Debugf("Scan of host at %v succeeded.", pubKey)
   544  		entry.HostExternalSettings = settings
   545  	}
   546  	success := err == nil
   547  
   548  	hdb.mu.Lock()
   549  	defer hdb.mu.Unlock()
   550  	// We don't want to override the NetAddress during a scan so we need to
   551  	// retrieve the most recent NetAddress from the tree first.
   552  	oldEntry, exists := hdb.staticHostTree.Select(entry.PublicKey)
   553  	if exists {
   554  		entry.NetAddress = oldEntry.NetAddress
   555  	}
   556  	// Update the host tree to have a new entry, including the new error. Then
   557  	// delete the entry from the scan map as the scan has been successful.
   558  	hdb.updateEntry(entry, err)
   559  
   560  	// Add the scan to the initialScanLatencies if it was successful.
   561  	if success && len(hdb.initialScanLatencies) < minScansForSpeedup {
   562  		hdb.initialScanLatencies = append(hdb.initialScanLatencies, latency)
   563  		// If the slice has reached its maximum size we sort it.
   564  		if len(hdb.initialScanLatencies) == minScansForSpeedup {
   565  			sort.Slice(hdb.initialScanLatencies, func(i, j int) bool {
   566  				return hdb.initialScanLatencies[i] < hdb.initialScanLatencies[j]
   567  			})
   568  		}
   569  	}
   570  }
   571  
   572  // waitForScans is a helper function that blocks until the hostDB's scanList is
   573  // empty.
   574  func (hdb *HostDB) managedWaitForScans() {
   575  	for {
   576  		hdb.mu.Lock()
   577  		length := len(hdb.scanList)
   578  		hdb.mu.Unlock()
   579  		if length == 0 {
   580  			break
   581  		}
   582  		select {
   583  		case <-hdb.tg.StopChan():
   584  			return
   585  		case <-time.After(scanCheckInterval):
   586  		}
   587  	}
   588  }
   589  
   590  // threadedProbeHosts pulls hosts from the thread pool and runs a scan on them.
   591  func (hdb *HostDB) threadedProbeHosts(scanPool <-chan skymodules.HostDBEntry) {
   592  	for hostEntry := range scanPool {
   593  		// Block until hostdb has internet connectivity.
   594  		for {
   595  			if hdb.staticGateway.Online() {
   596  				break
   597  			}
   598  			select {
   599  			case <-time.After(time.Second * 30):
   600  				continue
   601  			case <-hdb.tg.StopChan():
   602  				return
   603  			}
   604  		}
   605  
   606  		// There appears to be internet connectivity, continue with the
   607  		// scan.
   608  		hdb.managedScanHost(hostEntry)
   609  	}
   610  }
   611  
   612  // threadedScan is an ongoing function which will query the full set of hosts
   613  // every few hours to see who is online and available for uploading.
   614  func (hdb *HostDB) threadedScan() {
   615  	err := hdb.tg.Add()
   616  	if err != nil {
   617  		return
   618  	}
   619  	defer hdb.tg.Done()
   620  
   621  	// Wait until the consensus set is synced. Only then we can be sure that
   622  	// the initial scan covers the whole network.
   623  	for {
   624  		if hdb.managedSynced() {
   625  			break
   626  		}
   627  		select {
   628  		case <-hdb.tg.StopChan():
   629  			return
   630  		case <-time.After(scanCheckInterval):
   631  		}
   632  	}
   633  
   634  	// Block scan when a specific dependency is provided.
   635  	hdb.staticDeps.Disrupt("BlockScan")
   636  
   637  	// Make sure that all hosts have gone through the initial scanning.
   638  	allHosts := hdb.staticHostTree.All()
   639  	hdb.mu.Lock()
   640  	for _, host := range allHosts {
   641  		if len(host.ScanHistory) == 0 {
   642  			hdb.queueScan(host)
   643  		}
   644  	}
   645  	hdb.mu.Unlock()
   646  
   647  	// Do nothing until the scan list is empty. If there are hosts in the scan
   648  	// list, other threads are ensuring they all get scanned.
   649  	hdb.managedWaitForScans()
   650  
   651  	hdb.mu.Lock()
   652  	// Set the flag to indicate that the initial scan is complete.
   653  	hdb.initialScanComplete = true
   654  	// Copy the known contracts to avoid having to lock the hdb later.
   655  	knownContracts := make(map[string]contractInfo)
   656  	for k, c := range hdb.knownContracts {
   657  		knownContracts[k] = c
   658  	}
   659  	hdb.mu.Unlock()
   660  
   661  	for {
   662  		// Before we start a new iteration of the scanloop we check if the
   663  		// txnFees need to be updated.
   664  		hdb.managedUpdateTxnFees()
   665  
   666  		// Set up a scan for the hostCheckupQuantity most valuable hosts in the
   667  		// hostdb. Hosts that fail their scans will be docked significantly,
   668  		// pushing them further back in the hierarchy, ensuring that for the
   669  		// most part only online hosts are getting scanned unless there are
   670  		// fewer than hostCheckupQuantity of them.
   671  
   672  		// Grab a set of hosts to scan, grab hosts that are active, inactive, offline
   673  		// and known to get high diversity.
   674  		var onlineHosts, offlineHosts, knownHosts []skymodules.HostDBEntry
   675  		allHosts := hdb.staticHostTree.All()
   676  		for i := len(allHosts) - 1; i >= 0; i-- {
   677  			if len(onlineHosts) >= hostCheckupQuantity &&
   678  				len(offlineHosts) >= hostCheckupQuantity &&
   679  				len(knownHosts) == len(knownContracts) {
   680  				break
   681  			}
   682  
   683  			// Figure out if the host is known, online or offline.
   684  			host := allHosts[i]
   685  			online := len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory)-1].Success
   686  			_, known := knownContracts[host.PublicKey.String()]
   687  			if known {
   688  				knownHosts = append(knownHosts, host)
   689  			} else if online && len(onlineHosts) < hostCheckupQuantity {
   690  				onlineHosts = append(onlineHosts, host)
   691  			} else if !online && len(offlineHosts) < hostCheckupQuantity {
   692  				offlineHosts = append(offlineHosts, host)
   693  			}
   694  		}
   695  
   696  		// Queue the scans for each host.
   697  		hdb.staticLog.Println("Performing scan on", len(onlineHosts), "online hosts and", len(offlineHosts), "offline hosts and", len(knownHosts), "known hosts.")
   698  		hdb.mu.Lock()
   699  		for _, host := range knownHosts {
   700  			hdb.queueScan(host)
   701  		}
   702  		for _, host := range onlineHosts {
   703  			hdb.queueScan(host)
   704  		}
   705  		for _, host := range offlineHosts {
   706  			hdb.queueScan(host)
   707  		}
   708  		hdb.mu.Unlock()
   709  
   710  		// Sleep for a random amount of time before doing another round of
   711  		// scanning. The minimums and maximums keep the scan time reasonable,
   712  		// while the randomness prevents the scanning from always happening at
   713  		// the same time of day or week.
   714  		sleepRange := uint64(maxScanSleep - minScanSleep)
   715  		sleepTime := minScanSleep + time.Duration(fastrand.Uint64n(sleepRange))
   716  
   717  		// Sleep until it's time for the next scan cycle.
   718  		select {
   719  		case <-hdb.tg.StopChan():
   720  			return
   721  		case <-time.After(sleepTime):
   722  		}
   723  	}
   724  }
   725  
   726  // compatv164SortEntryScans holds some compat code that ensures the entries in
   727  // an entry's scan history are sorted. It returns true if we had to sort the
   728  // scan history.
   729  func compatv164SortEntryScans(entry *skymodules.HostDBEntry) bool {
   730  	// escape early if there's no scan history
   731  	scans := entry.ScanHistory
   732  	if len(scans) == 0 {
   733  		return false
   734  	}
   735  
   736  	// escape early if the scans are sorted and not in the future
   737  	sorted := sort.SliceIsSorted(scans, func(i, j int) bool { return scans[i].Timestamp.Before(scans[j].Timestamp) })
   738  	inTheFuture := scans[len(scans)-1].Timestamp.After(time.Now())
   739  	if sorted && !inTheFuture {
   740  		return false
   741  	}
   742  
   743  	// make sure the latest scan is not in the future
   744  	if scans[len(scans)-1].Timestamp.After(time.Now()) {
   745  		scans[len(scans)-1].Timestamp = time.Now()
   746  	}
   747  
   748  	// traverse the scans in reverse and ensure all entries are sorted, when we
   749  	// encounter an unsorted entry we update the scan to have occurred one block
   750  	// earlier
   751  	for i := len(scans) - 1; i > 0; i-- {
   752  		if !scans[i].Timestamp.After(scans[i-1].Timestamp) {
   753  			scans[i-1].Timestamp = scans[i].Timestamp.Add(time.Minute * 10 * -1)
   754  		}
   755  	}
   756  
   757  	return true
   758  }
   759  
   760  // fetchPriceTable fetches a price table from a host without paying. This means
   761  // the price table is only useful for scoring the host and can't be used. This
   762  // uses an ephemeral stream which is a special type of stream that doesn't leak
   763  // TCP connections. Otherwise we would end up with one TCP connection for every
   764  // host in the network after scanning the whole network.
   765  func fetchPriceTable(siamux *siamux.SiaMux, hostAddr string, timeout time.Duration, hpk mux.ED25519PublicKey) (_ *modules.RPCPriceTable, err error) {
   766  	stream, err := siamux.NewEphemeralStream(modules.HostSiaMuxSubscriberName, hostAddr, timeout, hpk)
   767  	if err != nil {
   768  		return nil, errors.AddContext(err, "failed to create ephemeral stream")
   769  	}
   770  	defer func() {
   771  		err = errors.Compose(err, stream.Close())
   772  	}()
   773  
   774  	// set a deadline on the stream.
   775  	err = stream.SetDeadline(time.Now().Add(hostScanDeadline))
   776  	if err != nil {
   777  		return nil, errors.AddContext(err, "failed to set stream deadline")
   778  	}
   779  
   780  	// initiate the RPC
   781  	err = modules.RPCWrite(stream, modules.RPCUpdatePriceTable)
   782  	if err != nil {
   783  		return nil, errors.AddContext(err, "failed to write price table RPC specifier")
   784  	}
   785  
   786  	// receive the price table response
   787  	var update modules.RPCUpdatePriceTableResponse
   788  	err = modules.RPCRead(stream, &update)
   789  	if err != nil {
   790  		return nil, errors.AddContext(err, "failed to read price table response")
   791  	}
   792  
   793  	// unmarshal the price table
   794  	var pt modules.RPCPriceTable
   795  	err = json.Unmarshal(update.PriceTableJSON, &pt)
   796  	if err != nil {
   797  		return nil, errors.AddContext(err, "failed to unmarshal price table")
   798  	}
   799  	return &pt, nil
   800  }