github.com/billybanfield/evergreen@v0.0.0-20170525200750-eeee692790f7/scheduler/duration_based_host_allocator.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"math"
     5  	"sort"
     6  	"time"
     7  
     8  	"github.com/evergreen-ci/evergreen"
     9  	"github.com/evergreen-ci/evergreen/cloud/providers"
    10  	"github.com/evergreen-ci/evergreen/cloud/providers/static"
    11  	"github.com/evergreen-ci/evergreen/model"
    12  	"github.com/evergreen-ci/evergreen/model/distro"
    13  	"github.com/evergreen-ci/evergreen/model/host"
    14  	"github.com/evergreen-ci/evergreen/model/task"
    15  	"github.com/evergreen-ci/evergreen/util"
    16  	"github.com/mitchellh/mapstructure"
    17  	"github.com/mongodb/grip"
    18  	"github.com/pkg/errors"
    19  )
    20  
    21  const (
    22  	// maximum turnaround we want to maintain for all hosts for a given distro
    23  	MaxDurationPerDistroHost = time.Hour
    24  
    25  	// for distro queues with tasks that appear on other queues, this constant
    26  	// indicates the fraction of the total duration of shared tasks that we want
    27  	// to account for when alternate distros are unable to satisfy the
    28  	// turnaround requirement as determined by MaxDurationPerDistroHost
    29  	SharedTasksAllocationProportion = 0.8
    30  )
    31  
    32  // DistroScheduleData contains bookkeeping data that is used by distros to
    33  // determine whether or not to allocate more hosts
    34  type DistroScheduleData struct {
    35  
    36  	// indicates the total number of existing hosts for this distro
    37  	numExistingHosts int
    38  
    39  	// indicates the nominal number of new hosts to spin up for this distro
    40  	nominalNumNewHosts int
    41  
    42  	// indicates the maximum number of hosts allowed for this distro
    43  	poolSize int
    44  
    45  	// indicates the number of tasks in this distro's queue
    46  	taskQueueLength int
    47  
    48  	// indicates the number of free hosts this distro current has
    49  	numFreeHosts int
    50  
    51  	// indicates the total number of seconds (based on the expected running
    52  	// duration) that tasks within a queue (but also appear on other queues)
    53  	// will take to run. It is a map of distro name -> cumulative expected
    54  	// duration of task queue items this distro shares with the keyed distro -
    55  	// the duration is specified in seconds
    56  	sharedTasksDuration map[string]float64
    57  
    58  	// indicates the total number of seconds (based on the expected running
    59  	// duration) for tasks currently running on hosts of this distro
    60  	runningTasksDuration float64
    61  
    62  	// indicates the total number of seconds (based on the expected running
    63  	// duration) for all tasks currently running on hosts of this distro in
    64  	// addition to the total expected duration of all scheduled tasks on it
    65  	totalTasksDuration float64
    66  }
    67  
    68  // ScheduledDistroTasksData contains data that is used to compute the expected
    69  // duration of tasks within a distro's queue
    70  type ScheduledDistroTasksData struct {
    71  
    72  	// all tasks in this distro's queue
    73  	taskQueueItems []model.TaskQueueItem
    74  
    75  	// all tasks that have been previously accounted for in other distros
    76  	tasksAccountedFor map[string]bool
    77  
    78  	// all distros this task could run on
    79  	taskRunDistros map[string][]string
    80  
    81  	// the name of the distro whose task queue items data this represents
    82  	currentDistroId string
    83  }
    84  
    85  // DurationBasedHostAllocator computes the total time to completion of tasks
    86  // running - per distro - and then uses that as a heuristic in determining
    87  // how many new hosts to spin up
    88  type DurationBasedHostAllocator struct{}
    89  
    90  // helper type to sort distros by the number of static hosts they have
    91  type sortableDistroByNumStaticHost struct {
    92  	distros  []distro.Distro
    93  	settings *evergreen.Settings
    94  }
    95  
    96  // NewHostsNeeded decides if new hosts are needed for a
    97  // distro while taking the duration of running/scheduled tasks into
    98  // consideration. Returns a map of distro to number of hosts to spawn.
    99  func (self *DurationBasedHostAllocator) NewHostsNeeded(
   100  	hostAllocatorData HostAllocatorData, settings *evergreen.Settings) (newHostsNeeded map[string]int,
   101  	err error) {
   102  
   103  	queueDistros := make([]distro.Distro, 0,
   104  		len(hostAllocatorData.taskQueueItems))
   105  
   106  	// Sanity check to ensure that we have a distro object for each item in the
   107  	// task queue. Also pulls the distros we need for sorting
   108  	for distroId := range hostAllocatorData.taskQueueItems {
   109  		distro, ok := hostAllocatorData.distros[distroId]
   110  		if !ok {
   111  			return nil, errors.Errorf("No distro info available for distro %v",
   112  				distroId)
   113  		}
   114  		if distro.Id != distroId {
   115  			return nil, errors.Errorf("Bad mapping between task queue distro "+
   116  				"name and host allocator distro data: %v != %v", distro.Id,
   117  				distroId)
   118  		}
   119  		queueDistros = append(queueDistros, distro)
   120  	}
   121  
   122  	// sort the distros by the number of static hosts available. why?
   123  	// well if we have tasks that can run on say 2 distros, one with static
   124  	// hosts and other without, we want to spin up new machines for the latter
   125  	// only if the former is unable to satisfy the turnaround requirement - as
   126  	// determined by MaxDurationPerDistroHost
   127  	distros := sortDistrosByNumStaticHosts(queueDistros, settings)
   128  
   129  	// for all distros, this maintains a mapping of distro name -> the number
   130  	// of new hosts needed for that distro
   131  	newHostsNeeded = make(map[string]int)
   132  
   133  	// across all distros, this maintains a mapping of task id -> bool - a
   134  	// boolean that indicates if we've accounted for this task from some
   135  	// distro's queue
   136  	tasksAccountedFor := make(map[string]bool)
   137  
   138  	// for each distro, this contains data on pertinent information that was
   139  	// used in creating a nominal number of new hosts needed for that distro
   140  	distroScheduleData := make(map[string]DistroScheduleData)
   141  
   142  	// now, for each distro, see if we need to spin up any new hosts
   143  	for _, d := range distros {
   144  		newHostsNeeded[d.Id], err = self.
   145  			numNewHostsForDistro(&hostAllocatorData, d, tasksAccountedFor,
   146  				distroScheduleData, settings)
   147  		if err != nil {
   148  			grip.Errorln("Error getting num hosts for distro:", err)
   149  			return nil, err
   150  		}
   151  	}
   152  
   153  	grip.Infof("Reporting hosts needed: %#v", newHostsNeeded)
   154  	return newHostsNeeded, nil
   155  }
   156  
   157  // computeScheduledTasksDuration returns the total estimated duration of all
   158  // tasks scheduled to be run in a given task queue
   159  func computeScheduledTasksDuration(
   160  	scheduledDistroTasksData *ScheduledDistroTasksData) (
   161  	scheduledTasksDuration float64, sharedTasksDuration map[string]float64) {
   162  
   163  	taskQueueItems := scheduledDistroTasksData.taskQueueItems
   164  	taskRunDistros := scheduledDistroTasksData.taskRunDistros
   165  	tasksAccountedFor := scheduledDistroTasksData.tasksAccountedFor
   166  	currentDistroId := scheduledDistroTasksData.currentDistroId
   167  	sharedTasksDuration = make(map[string]float64)
   168  
   169  	// compute the total expected duration for tasks in this queue
   170  	for _, taskQueueItem := range taskQueueItems {
   171  		if !tasksAccountedFor[taskQueueItem.Id] {
   172  			scheduledTasksDuration += taskQueueItem.ExpectedDuration.Seconds()
   173  			tasksAccountedFor[taskQueueItem.Id] = true
   174  		}
   175  
   176  		// if the task can be run on multiple distros - including this one - add
   177  		// it to the total duration of 'shared tasks' for the distro and all
   178  		// other distros it can be run on
   179  		distroIds, ok := taskRunDistros[taskQueueItem.Id]
   180  		if ok && util.SliceContains(distroIds, currentDistroId) {
   181  			for _, distroId := range distroIds {
   182  				sharedTasksDuration[distroId] +=
   183  					taskQueueItem.ExpectedDuration.Seconds()
   184  			}
   185  		}
   186  	}
   187  	return
   188  }
   189  
   190  // computeRunningTasksDuration returns the estimated time to completion of all
   191  // currently running tasks for a given distro given its hosts
   192  func computeRunningTasksDuration(existingDistroHosts []host.Host,
   193  	taskDurations model.ProjectTaskDurations) (runningTasksDuration float64,
   194  	err error) {
   195  
   196  	runningTaskIds := []string{}
   197  
   198  	for _, existingDistroHost := range existingDistroHosts {
   199  		if existingDistroHost.RunningTask != "" {
   200  			runningTaskIds = append(runningTaskIds,
   201  				existingDistroHost.RunningTask)
   202  		}
   203  	}
   204  
   205  	// if this distro's hosts are all free, return immediately
   206  	if len(runningTaskIds) == 0 {
   207  		return
   208  	}
   209  
   210  	runningTasksMap := make(map[string]task.Task)
   211  	runningTasks, err := task.Find(task.ByIds(runningTaskIds))
   212  	if err != nil {
   213  		return runningTasksDuration, err
   214  	}
   215  
   216  	// build a map of task id => task
   217  	for _, runningTask := range runningTasks {
   218  		runningTasksMap[runningTask.Id] = runningTask
   219  	}
   220  
   221  	// compute the total time to completion for running tasks
   222  	for _, runningTaskId := range runningTaskIds {
   223  		runningTask, ok := runningTasksMap[runningTaskId]
   224  		if !ok {
   225  			return runningTasksDuration, errors.Errorf("Unable to find running "+
   226  				"task with _id %v", runningTaskId)
   227  		}
   228  		expectedDuration := model.GetTaskExpectedDuration(runningTask,
   229  			taskDurations)
   230  		elapsedTime := time.Since(runningTask.StartTime)
   231  		if elapsedTime > expectedDuration {
   232  			// probably an outlier; or an unknown data point
   233  			continue
   234  		}
   235  		runningTasksDuration += expectedDuration.Seconds() -
   236  			elapsedTime.Seconds()
   237  	}
   238  	return
   239  }
   240  
   241  // computeDurationBasedNumNewHosts returns the number of new hosts needed based
   242  // on a heuristic that utilizes the total duration of currently running and
   243  // scheduled tasks - and based on a maximum duration of a task per distro host -
   244  // a turnaround cap on all outstanding and running tasks in the system
   245  func computeDurationBasedNumNewHosts(scheduledTasksDuration,
   246  	runningTasksDuration, numExistingDistroHosts float64,
   247  	maxDurationPerHost time.Duration) (numNewHosts int) {
   248  
   249  	// total duration of scheduled and currently running tasks
   250  	totalDistroTasksDuration := scheduledTasksDuration +
   251  		runningTasksDuration
   252  
   253  	// number of hosts needed to meet the duration based turnaround requirement
   254  	numHostsForTurnaroundRequirement := totalDistroTasksDuration /
   255  		maxDurationPerHost.Seconds()
   256  
   257  	// floating point precision number of new hosts needed
   258  	durationBasedNumNewHostsNeeded := numHostsForTurnaroundRequirement -
   259  		numExistingDistroHosts
   260  
   261  	// duration based number of new hosts needed
   262  	numNewHosts = int(math.Ceil(durationBasedNumNewHostsNeeded))
   263  
   264  	// return 0 if numNewHosts is less than 0
   265  	if numNewHosts < 0 {
   266  		numNewHosts = 0
   267  	}
   268  	return
   269  }
   270  
   271  // fetchExcessSharedDuration returns a slice of duration times (as seconds)
   272  // given a distro and a map of DistroScheduleData, it traverses the map looking
   273  // for alternate distros that are unable to meet maxDurationPerHost (turnaround)
   274  // specification for shared tasks
   275  func fetchExcessSharedDuration(distroScheduleData map[string]DistroScheduleData,
   276  	distro string,
   277  	maxDurationPerHost time.Duration) (sharedTasksDurationTimes []float64) {
   278  
   279  	distroData := distroScheduleData[distro]
   280  
   281  	// if we have more tasks to run than we have existing hosts and at least one
   282  	// other alternate distro can not run all its shared scheduled and running tasks
   283  	// within the maxDurationPerHost period, we need some more hosts for this distro
   284  	for sharedDistro, sharedDuration := range distroData.sharedTasksDuration {
   285  		if distro == sharedDistro {
   286  			continue
   287  		}
   288  
   289  		alternateDistroScheduleData := distroScheduleData[sharedDistro]
   290  
   291  		durationPerHost := alternateDistroScheduleData.totalTasksDuration /
   292  			(float64(alternateDistroScheduleData.numExistingHosts) +
   293  				float64(alternateDistroScheduleData.nominalNumNewHosts))
   294  
   295  		// if no other alternate distro is able to meet the host requirements,
   296  		// append its shared tasks duration time to the returned slice
   297  		if durationPerHost > maxDurationPerHost.Seconds() {
   298  			sharedTasksDurationTimes = append(sharedTasksDurationTimes,
   299  				sharedDuration)
   300  		}
   301  	}
   302  	return
   303  }
   304  
   305  // orderedScheduleNumNewHosts returns the final number of new hosts to spin up
   306  // for this distro. It uses the distroScheduleData map to determine if tasks
   307  // already accounted - tasks that appear in this distro's queue but also were
   308  // accounted for since they appeared in an earlier distro's queue - are too much
   309  // for other distro hosts to handle. In particular, it considers the nominal
   310  // number of new hosts needed (based on the duration estimate) - and then
   311  // revises this number if needed. This is necessary since we use a predefined
   312  // distro order when we determine scheduling (while maintaining a map of all
   313  // queue items we've seen) and allow for a task to run on one or more distros.
   314  // Essentially, it is meant to mitigate the pathological situation where no new
   315  // hosts are spun up for a latter processed distro (even when needed) as items
   316  // on the latter distro's queues are considered already 'accounted for'.
   317  func orderedScheduleNumNewHosts(
   318  	distroScheduleData map[string]DistroScheduleData,
   319  	distro string, maxDurationPerHost time.Duration,
   320  	sharedTasksAllocationProportion float64) int {
   321  
   322  	// examine the current distro's schedule data
   323  	distroData := distroScheduleData[distro]
   324  
   325  	// if we're spinning up additional new hosts then we don't need any new
   326  	// hosts here so we return the nominal number of new hosts
   327  	if distroData.nominalNumNewHosts != 0 {
   328  		return distroData.nominalNumNewHosts
   329  	}
   330  
   331  	// if the current distro does not share tasks with any other distro,
   332  	// return 0
   333  	if distroData.sharedTasksDuration == nil {
   334  		return 0
   335  	}
   336  
   337  	// if the current distro can not spin up any more hosts, return 0
   338  	if distroData.poolSize <= distroData.numExistingHosts {
   339  		return 0
   340  	}
   341  
   342  	// for distros that share task queue items with this distro, find if any of
   343  	// the distros is unable to satisfy the turnaround requirement as determined
   344  	// by maxDurationPerHost
   345  	sharedTasksDurationTimes := fetchExcessSharedDuration(distroScheduleData,
   346  		distro, maxDurationPerHost)
   347  
   348  	// if all alternate distros can meet the turnaround requirements, return 0
   349  	if len(sharedTasksDurationTimes) == 0 {
   350  		return 0
   351  	}
   352  
   353  	// if we get here, then it means we need more hosts and don't have any new
   354  	// or pending hosts to handle outstanding tasks for this distro - within the
   355  	// maxDurationPerHost threshold
   356  	sort.Float64s(sharedTasksDurationTimes)
   357  
   358  	// we are most interested with the alternate distro with which we have the
   359  	// largest sum total of shared tasks duration
   360  	sharedTasksDuration := sharedTasksDurationTimes[len(
   361  		sharedTasksDurationTimes)-1]
   362  
   363  	// we utilize a percentage of the total duration of all 'shared tasks' we
   364  	// want to incorporate in the revised duration based number of new hosts
   365  	// estimate calculation. this percentage is specified by
   366  	// sharedTasksAllocationProportion.
   367  	// Note that this is a subset of the duration of the number of scheduled
   368  	// tasks - specifically, one that only considers shared tasks and ignores
   369  	// tasks that have been exclusively scheduled on this distro alone
   370  	scheduledTasksDuration := distroData.runningTasksDuration +
   371  		sharedTasksDuration
   372  
   373  	// in order to conserve money, we take only a fraction of the shared
   374  	// duration in computing the total shared duration
   375  	scheduledTasksDuration *= sharedTasksAllocationProportion
   376  
   377  	// make a second call to compute the number of new hosts needed with a
   378  	// revised duration of scheduled tasks
   379  	durationBasedNumNewHosts := computeDurationBasedNumNewHosts(
   380  		scheduledTasksDuration, distroData.runningTasksDuration,
   381  		float64(distroData.numExistingHosts), maxDurationPerHost)
   382  
   383  	return numNewDistroHosts(distroData.poolSize, distroData.numExistingHosts,
   384  		distroData.numFreeHosts, durationBasedNumNewHosts,
   385  		distroData.taskQueueLength)
   386  }
   387  
   388  // numNewDistroHosts computes the number of new hosts needed as allowed by
   389  // poolSize. if the duration based estimate (durNewHosts) is too large, e.g.
   390  // when there's a small number of very long running tasks, utilize the deficit
   391  // of available hosts vs. tasks to be run
   392  func numNewDistroHosts(poolSize, numExistingHosts, numFreeHosts, durNewHosts,
   393  	taskQueueLength int) (numNewHosts int) {
   394  
   395  	numNewHosts = util.Min(
   396  		// the maximum number of new hosts we're allowed to spin up
   397  		poolSize-numExistingHosts,
   398  
   399  		// the duration based estimate for the number of new hosts needed
   400  		durNewHosts,
   401  
   402  		// the deficit of available hosts vs. tasks to be run
   403  		taskQueueLength-numFreeHosts,
   404  	)
   405  
   406  	// cap to zero as lower bound
   407  	if numNewHosts < 0 {
   408  		numNewHosts = 0
   409  	}
   410  	return
   411  }
   412  
   413  // numNewHostsForDistro determine how many new hosts should be spun up for an
   414  // individual distro.
   415  func (self *DurationBasedHostAllocator) numNewHostsForDistro(
   416  	hostAllocatorData *HostAllocatorData, distro distro.Distro,
   417  	tasksAccountedFor map[string]bool,
   418  	distroScheduleData map[string]DistroScheduleData, settings *evergreen.Settings) (numNewHosts int,
   419  	err error) {
   420  
   421  	projectTaskDurations := hostAllocatorData.projectTaskDurations
   422  	existingDistroHosts := hostAllocatorData.existingDistroHosts[distro.Id]
   423  	taskQueueItems := hostAllocatorData.taskQueueItems[distro.Id]
   424  	taskRunDistros := hostAllocatorData.taskRunDistros
   425  
   426  	// determine how many free hosts we have
   427  	numFreeHosts := 0
   428  	for _, existingDistroHost := range existingDistroHosts {
   429  		if existingDistroHost.RunningTask == "" {
   430  			numFreeHosts += 1
   431  		}
   432  	}
   433  
   434  	// determine the total remaining running time of all
   435  	// tasks currently running on the hosts for this distro
   436  	runningTasksDuration, err := computeRunningTasksDuration(
   437  		existingDistroHosts, projectTaskDurations)
   438  
   439  	if err != nil {
   440  		return numNewHosts, err
   441  	}
   442  
   443  	// construct the data needed by computeScheduledTasksDuration
   444  	scheduledDistroTasksData := &ScheduledDistroTasksData{
   445  		taskQueueItems:    taskQueueItems,
   446  		tasksAccountedFor: tasksAccountedFor,
   447  		taskRunDistros:    taskRunDistros,
   448  		currentDistroId:   distro.Id,
   449  	}
   450  
   451  	// determine the total expected running time of all scheduled
   452  	// tasks for this distro
   453  	scheduledTasksDuration, sharedTasksDuration :=
   454  		computeScheduledTasksDuration(scheduledDistroTasksData)
   455  
   456  	// find the number of new hosts needed based on the total estimated
   457  	// duration for all outstanding and in-flight tasks for this distro
   458  	durationBasedNumNewHosts := computeDurationBasedNumNewHosts(
   459  		scheduledTasksDuration, runningTasksDuration,
   460  		float64(len(existingDistroHosts)), MaxDurationPerDistroHost)
   461  
   462  	// revise the new host estimate based on the cap of the number of new hosts
   463  	// and the number of free hosts
   464  	numNewHosts = numNewDistroHosts(distro.PoolSize, len(existingDistroHosts),
   465  		numFreeHosts, durationBasedNumNewHosts, len(taskQueueItems))
   466  
   467  	// create an entry for this distro in the scheduling map
   468  	distroScheduleData[distro.Id] = DistroScheduleData{
   469  		nominalNumNewHosts:   numNewHosts,
   470  		numFreeHosts:         numFreeHosts,
   471  		poolSize:             distro.PoolSize,
   472  		taskQueueLength:      len(taskQueueItems),
   473  		sharedTasksDuration:  sharedTasksDuration,
   474  		runningTasksDuration: runningTasksDuration,
   475  		numExistingHosts:     len(existingDistroHosts),
   476  		totalTasksDuration:   scheduledTasksDuration + runningTasksDuration,
   477  	}
   478  
   479  	cloudManager, err := providers.GetCloudManager(distro.Provider, settings)
   480  	if err != nil {
   481  		err = errors.Wrapf(err, "Couldn't get cloud manager for %s (%s)",
   482  			distro.Provider, distro.Id)
   483  		grip.Error(err)
   484  		return 0, err
   485  	}
   486  
   487  	can, err := cloudManager.CanSpawn()
   488  	if err != nil {
   489  		err = errors.Wrapf(err, "Problem checking if '%v' provider can spawn hosts",
   490  			distro.Provider)
   491  		grip.Error(err)
   492  		return 0, nil
   493  	}
   494  	if !can {
   495  		return 0, nil
   496  	}
   497  
   498  	// revise the nominal number of new hosts if needed
   499  	numNewHosts = orderedScheduleNumNewHosts(distroScheduleData, distro.Id,
   500  		MaxDurationPerDistroHost, SharedTasksAllocationProportion)
   501  
   502  	grip.Infof("Spawning %d additional hosts for %s - currently at %d existing hosts (%d free)",
   503  		numNewHosts, distro.Id, len(existingDistroHosts), numFreeHosts)
   504  	grip.Infof("Total estimated time to process all '%s' scheduled tasks is %s; %d running "+
   505  		"tasks at %s, %d pending tasks at %s (shared tasks duration map: %v)",
   506  		distro.Id,
   507  		time.Duration(scheduledTasksDuration+runningTasksDuration)*time.Second,
   508  		len(existingDistroHosts)-numFreeHosts,
   509  		time.Duration(runningTasksDuration)*time.Second,
   510  		len(taskQueueItems),
   511  		time.Duration(scheduledTasksDuration)*time.Second,
   512  		sharedTasksDuration)
   513  
   514  	return numNewHosts, nil
   515  }
   516  
   517  // sortDistrosByNumStaticHosts returns a sorted slice of distros where the
   518  // distro with the greatest number of static host is first - at index position 0
   519  func sortDistrosByNumStaticHosts(distros []distro.Distro, settings *evergreen.Settings) []distro.Distro {
   520  	sortableDistroObj := &sortableDistroByNumStaticHost{distros, settings}
   521  	sort.Sort(sortableDistroObj)
   522  	return sortableDistroObj.distros
   523  }
   524  
   525  // helpers for sorting the distros by the number of their static hosts
   526  func (sd *sortableDistroByNumStaticHost) Len() int {
   527  	return len(sd.distros)
   528  }
   529  
   530  func (sd *sortableDistroByNumStaticHost) Less(i, j int) bool {
   531  	if sd.distros[i].Provider != evergreen.HostTypeStatic &&
   532  		sd.distros[j].Provider != evergreen.HostTypeStatic {
   533  		return false
   534  	}
   535  	if sd.distros[i].Provider == evergreen.HostTypeStatic &&
   536  		sd.distros[j].Provider != evergreen.HostTypeStatic {
   537  		return true
   538  	}
   539  	if sd.distros[i].Provider != evergreen.HostTypeStatic &&
   540  		sd.distros[j].Provider == evergreen.HostTypeStatic {
   541  		return false
   542  	}
   543  
   544  	h1 := &static.Settings{}
   545  	h2 := &static.Settings{}
   546  
   547  	err := mapstructure.Decode(sd.distros[i].ProviderSettings, h1)
   548  	if err != nil {
   549  		return false
   550  	}
   551  
   552  	err = mapstructure.Decode(sd.distros[j].ProviderSettings, h2)
   553  	if err != nil {
   554  		return false
   555  	}
   556  
   557  	return len(h1.Hosts) > len(h2.Hosts)
   558  }
   559  
   560  func (sd *sortableDistroByNumStaticHost) Swap(i, j int) {
   561  	sd.distros[i], sd.distros[j] =
   562  		sd.distros[j], sd.distros[i]
   563  }