github.com/justinjmoses/evergreen@v0.0.0-20170530173719-1d50e381ff0d/scheduler/duration_based_host_allocator_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"strconv"
     5  	"testing"
     6  	"time"
     7  
     8  	"github.com/evergreen-ci/evergreen"
     9  	"github.com/evergreen-ci/evergreen/db"
    10  	"github.com/evergreen-ci/evergreen/model"
    11  	"github.com/evergreen-ci/evergreen/model/distro"
    12  	"github.com/evergreen-ci/evergreen/model/host"
    13  	"github.com/evergreen-ci/evergreen/model/task"
    14  	"github.com/evergreen-ci/evergreen/testutil"
    15  	"github.com/mongodb/grip"
    16  	. "github.com/smartystreets/goconvey/convey"
    17  )
    18  
    19  var hostAllocatorTestConf = testutil.TestConfig()
    20  
    21  func init() {
    22  	db.SetGlobalSessionProvider(db.SessionFactoryFromConfig(hostAllocatorTestConf))
    23  	grip.CatchError(grip.SetSender(testutil.SetupTestSender(hostAllocatorTestConf.Scheduler.LogFile)))
    24  }
    25  
    26  func TestDurationBasedNewHostsNeeded(t *testing.T) {
    27  	/*
    28  		Note that this is a functional test and its validity relies on the
    29  		values of:
    30  		1. MaxDurationPerDistroHost
    31  		2. SharedTasksAllocationProportion
    32  	*/
    33  	Convey("When calling the duration based NewHostsNeeded...", t, func() {
    34  		taskIds := []string{"t1", "t2", "t3", "t4", "t5", "t6", "t7"}
    35  		distroIds := []string{"d0", "d1", "d2", "d3"}
    36  		hostIds := []string{"h1", "h2", "h3", "h4", "h5", "h6", "h7"}
    37  
    38  		expDurs := []time.Duration{
    39  			18 * time.Hour,
    40  			17 * time.Hour,
    41  			16 * time.Hour,
    42  			15 * time.Hour,
    43  			14 * time.Hour,
    44  			13 * time.Hour,
    45  			12 * time.Hour,
    46  		}
    47  
    48  		// create a task run distro map such that we have a mix of distros a
    49  		// given set of tasks can run on
    50  		taskRunDistros := map[string][]string{
    51  			taskIds[0]: {distroIds[0], distroIds[1]},
    52  			taskIds[1]: {distroIds[1], distroIds[2]},
    53  			taskIds[2]: {distroIds[0], distroIds[2]},
    54  			taskIds[3]: {distroIds[1], distroIds[2]},
    55  			taskIds[4]: {distroIds[0], distroIds[2]},
    56  		}
    57  
    58  		taskDurations := model.ProjectTaskDurations{}
    59  
    60  		distroSlice := []distro.Distro{
    61  			{
    62  				Id:       distroIds[0],
    63  				Provider: "static",
    64  				PoolSize: 5,
    65  			},
    66  			{
    67  				Id:       distroIds[1],
    68  				Provider: "ec2",
    69  				PoolSize: 10,
    70  			},
    71  			{
    72  				Id:       distroIds[2],
    73  				Provider: "ec2",
    74  				PoolSize: 12,
    75  			},
    76  		}
    77  
    78  		taskQueueItems := []model.TaskQueueItem{
    79  			{Id: taskIds[0], ExpectedDuration: expDurs[0]},
    80  			{Id: taskIds[1], ExpectedDuration: expDurs[1]},
    81  			{Id: taskIds[2], ExpectedDuration: expDurs[2]},
    82  			{Id: taskIds[3], ExpectedDuration: expDurs[3]},
    83  			{Id: taskIds[4], ExpectedDuration: expDurs[4]},
    84  			{Id: taskIds[5], ExpectedDuration: expDurs[5]},
    85  			{Id: taskIds[6], ExpectedDuration: expDurs[6]},
    86  		}
    87  
    88  		hosts := [][]host.Host{
    89  			{
    90  				{Id: hostIds[0]},
    91  				{Id: hostIds[1]},
    92  				{Id: hostIds[2]},
    93  			},
    94  			{
    95  				{Id: hostIds[3]},
    96  				{Id: hostIds[4]},
    97  				{Id: hostIds[5]},
    98  			},
    99  			{},
   100  		}
   101  
   102  		durationBasedHostAllocator := &DurationBasedHostAllocator{}
   103  
   104  		Convey("ensure that the distro schedule data is used to spin "+
   105  			"up new hosts if needed",
   106  			func() {
   107  				hostAllocatorData := HostAllocatorData{
   108  					taskQueueItems: map[string][]model.TaskQueueItem{
   109  						distroIds[0]: taskQueueItems,
   110  						distroIds[1]: taskQueueItems,
   111  						distroIds[2]: taskQueueItems[:4],
   112  					},
   113  					existingDistroHosts: map[string][]host.Host{
   114  						distroIds[0]: hosts[0],
   115  						distroIds[1]: hosts[1],
   116  						distroIds[2]: hosts[2],
   117  					},
   118  					distros: map[string]distro.Distro{
   119  						distroIds[0]: distroSlice[0],
   120  						distroIds[1]: distroSlice[1],
   121  						distroIds[2]: distroSlice[2],
   122  					},
   123  					projectTaskDurations: taskDurations,
   124  					taskRunDistros:       taskRunDistros,
   125  				}
   126  
   127  				// integration test of duration based host allocator
   128  				newHostsNeeded, err := durationBasedHostAllocator.
   129  					NewHostsNeeded(hostAllocatorData, hostAllocatorTestConf)
   130  
   131  				So(err, ShouldBeNil)
   132  
   133  				// only distros with only static hosts should be zero
   134  				So(newHostsNeeded[distroIds[0]], ShouldEqual, 0)
   135  				So(newHostsNeeded[distroIds[1]], ShouldNotEqual, 0)
   136  				So(newHostsNeeded[distroIds[2]], ShouldNotEqual, 0)
   137  			})
   138  	})
   139  }
   140  
   141  func TestFetchExcessSharedDuration(t *testing.T) {
   142  	Convey("When calling fetchExcessSharedDuration...", t, func() {
   143  		distroOne := "d1"
   144  		distroTwo := "d2"
   145  
   146  		Convey("if alternate distros can't handle shared tasks duration "+
   147  			"within the threshold, the shared tasks duration should be "+
   148  			"returned",
   149  			func() {
   150  				distroOneScheduleData := DistroScheduleData{
   151  					numExistingHosts:   2,
   152  					nominalNumNewHosts: 2,
   153  					poolSize:           2,
   154  					taskQueueLength:    2,
   155  					numFreeHosts:       2,
   156  					sharedTasksDuration: map[string]float64{
   157  						distroTwo: 5000,
   158  					},
   159  					runningTasksDuration: 2,
   160  					totalTasksDuration:   2,
   161  				}
   162  
   163  				distroTwoScheduleData := DistroScheduleData{
   164  					numExistingHosts:   20,
   165  					nominalNumNewHosts: 0,
   166  					poolSize:           20,
   167  					taskQueueLength:    2,
   168  					numFreeHosts:       2,
   169  					sharedTasksDuration: map[string]float64{
   170  						distroOne: 5000,
   171  					},
   172  					runningTasksDuration: 2,
   173  					totalTasksDuration:   2000,
   174  				}
   175  
   176  				maxDurationPerDistroHost := time.Duration(10) * time.Second
   177  				distroScheduleData := map[string]DistroScheduleData{
   178  					distroOne: distroOneScheduleData,
   179  					distroTwo: distroTwoScheduleData,
   180  				}
   181  
   182  				// with a max duration per distro host of 10 seconds, and the
   183  				// duration per task on distro two at 100 seconds, we need more
   184  				// hosts for distro one
   185  				sharedDurations := fetchExcessSharedDuration(distroScheduleData,
   186  					distroOne, maxDurationPerDistroHost)
   187  
   188  				So(len(sharedDurations), ShouldEqual, 1)
   189  
   190  				// the sharedDuration value should equal the value in the
   191  				// alternate distro's map
   192  				So(distroTwoScheduleData.sharedTasksDuration[distroOne],
   193  					ShouldEqual, 5000)
   194  			})
   195  
   196  		Convey("if alternate distros can handle shared tasks duration "+
   197  			"within the threshold, no shared tasks duration should be returned",
   198  			func() {
   199  				distroOneScheduleData := DistroScheduleData{
   200  					numExistingHosts:   2,
   201  					nominalNumNewHosts: 2,
   202  					poolSize:           2,
   203  					taskQueueLength:    2,
   204  					numFreeHosts:       2,
   205  					sharedTasksDuration: map[string]float64{
   206  						distroTwo: 5000,
   207  					},
   208  					runningTasksDuration: 2,
   209  					totalTasksDuration:   2,
   210  				}
   211  
   212  				distroTwoScheduleData := DistroScheduleData{
   213  					numExistingHosts:   20,
   214  					nominalNumNewHosts: 0,
   215  					poolSize:           20,
   216  					taskQueueLength:    2,
   217  					numFreeHosts:       2,
   218  					sharedTasksDuration: map[string]float64{
   219  						distroOne: 5000,
   220  					},
   221  					runningTasksDuration: 2,
   222  					totalTasksDuration:   2000,
   223  				}
   224  
   225  				maxDurationPerDistroHost := time.Duration(100) * time.Second
   226  				distroScheduleData := map[string]DistroScheduleData{
   227  					distroOne: distroOneScheduleData,
   228  					distroTwo: distroTwoScheduleData,
   229  				}
   230  
   231  				// with a max duration per distro host of 100 seconds, and the
   232  				// duration per task on distro two at 100 seconds, we don't need
   233  				// any more hosts for distro one
   234  				sharedDurations := fetchExcessSharedDuration(distroScheduleData,
   235  					distroOne, maxDurationPerDistroHost)
   236  
   237  				So(len(sharedDurations), ShouldEqual, 0)
   238  
   239  				maxDurationPerDistroHost = time.Duration(200) * time.Second
   240  				sharedDurations = fetchExcessSharedDuration(distroScheduleData,
   241  					distroOne, maxDurationPerDistroHost)
   242  
   243  				So(len(sharedDurations), ShouldEqual, 0)
   244  			})
   245  	})
   246  }
   247  
   248  func TestOrderedScheduleNumNewHosts(t *testing.T) {
   249  	Convey("When calling orderedScheduleNumNewHosts...", t, func() {
   250  		distroOne := "d1"
   251  		distroTwo := "d2"
   252  
   253  		Convey("if new hosts are allocated, it should return the number of "+
   254  			"new hosts", func() {
   255  
   256  			distroOneScheduleData := DistroScheduleData{
   257  				numExistingHosts:   2,
   258  				nominalNumNewHosts: 3,
   259  				poolSize:           2,
   260  				taskQueueLength:    2,
   261  				numFreeHosts:       2,
   262  				sharedTasksDuration: map[string]float64{
   263  					distroTwo: 5000,
   264  				},
   265  				runningTasksDuration: 2,
   266  				totalTasksDuration:   2,
   267  			}
   268  
   269  			distroTwoScheduleData := DistroScheduleData{
   270  				numExistingHosts:   2,
   271  				nominalNumNewHosts: 10,
   272  				poolSize:           2,
   273  				taskQueueLength:    2,
   274  				numFreeHosts:       2,
   275  				sharedTasksDuration: map[string]float64{
   276  					distroOne: 5000,
   277  				},
   278  				runningTasksDuration: 2,
   279  				totalTasksDuration:   2,
   280  			}
   281  
   282  			distroScheduleData := map[string]DistroScheduleData{
   283  				distroOne: distroOneScheduleData,
   284  				distroTwo: distroTwoScheduleData,
   285  			}
   286  
   287  			So(orderedScheduleNumNewHosts(distroScheduleData, distroOne,
   288  				MaxDurationPerDistroHost, 1.0), ShouldEqual, 3)
   289  			So(orderedScheduleNumNewHosts(distroScheduleData, distroTwo,
   290  				MaxDurationPerDistroHost, 1.0), ShouldEqual, 10)
   291  		})
   292  
   293  		Convey("if the distro has no shared tasks, the nominal number of new "+
   294  			"hosts should be returned", func() {
   295  			distroOneScheduleData := DistroScheduleData{
   296  				numExistingHosts:     2,
   297  				nominalNumNewHosts:   0,
   298  				poolSize:             2,
   299  				taskQueueLength:      2,
   300  				numFreeHosts:         2,
   301  				runningTasksDuration: 2,
   302  				totalTasksDuration:   2,
   303  			}
   304  
   305  			distroTwoScheduleData := DistroScheduleData{
   306  				numExistingHosts:     2,
   307  				nominalNumNewHosts:   2,
   308  				poolSize:             2,
   309  				taskQueueLength:      2,
   310  				numFreeHosts:         2,
   311  				runningTasksDuration: 2,
   312  				totalTasksDuration:   2,
   313  			}
   314  
   315  			distroScheduleData := map[string]DistroScheduleData{
   316  				distroOne: distroOneScheduleData,
   317  				distroTwo: distroTwoScheduleData,
   318  			}
   319  			So(orderedScheduleNumNewHosts(distroScheduleData, distroOne,
   320  				MaxDurationPerDistroHost, 1.0), ShouldEqual, 0)
   321  		})
   322  
   323  		Convey("if the distro's max hosts is greater than the number of "+
   324  			"existing hosts, 0 should be returned", func() {
   325  			distroOneScheduleData := DistroScheduleData{
   326  				numExistingHosts:     2,
   327  				nominalNumNewHosts:   0,
   328  				poolSize:             22,
   329  				taskQueueLength:      2,
   330  				numFreeHosts:         2,
   331  				runningTasksDuration: 2,
   332  				totalTasksDuration:   2,
   333  			}
   334  
   335  			distroTwoScheduleData := DistroScheduleData{
   336  				numExistingHosts:     2,
   337  				nominalNumNewHosts:   2,
   338  				poolSize:             2,
   339  				taskQueueLength:      2,
   340  				numFreeHosts:         2,
   341  				runningTasksDuration: 2,
   342  				totalTasksDuration:   2,
   343  			}
   344  
   345  			distroScheduleData := map[string]DistroScheduleData{
   346  				distroOne: distroOneScheduleData,
   347  				distroTwo: distroTwoScheduleData,
   348  			}
   349  			So(orderedScheduleNumNewHosts(distroScheduleData, distroOne,
   350  				MaxDurationPerDistroHost, 1.0), ShouldEqual, 0)
   351  		})
   352  
   353  		Convey("if existing alternate distros can handle the tasks, 0 should "+
   354  			"be returned", func() {
   355  			distroOneScheduleData := DistroScheduleData{
   356  				numExistingHosts:   2,
   357  				nominalNumNewHosts: 0,
   358  				poolSize:           12,
   359  				sharedTasksDuration: map[string]float64{
   360  					distroTwo: 5000,
   361  				},
   362  				taskQueueLength:      20,
   363  				numFreeHosts:         2,
   364  				runningTasksDuration: 200,
   365  				totalTasksDuration:   2000,
   366  			}
   367  
   368  			distroTwoScheduleData := DistroScheduleData{
   369  				numExistingHosts:   3,
   370  				nominalNumNewHosts: 2,
   371  				poolSize:           12,
   372  				taskQueueLength:    2,
   373  				sharedTasksDuration: map[string]float64{
   374  					distroOne: 500,
   375  				},
   376  				numFreeHosts:         2,
   377  				runningTasksDuration: 200,
   378  				totalTasksDuration:   500,
   379  			}
   380  
   381  			maxDurationPerDistroHost := time.Duration(100) * time.Second
   382  			distroScheduleData := map[string]DistroScheduleData{
   383  				distroOne: distroOneScheduleData,
   384  				distroTwo: distroTwoScheduleData,
   385  			}
   386  			So(orderedScheduleNumNewHosts(distroScheduleData, distroOne,
   387  				maxDurationPerDistroHost, 1.0), ShouldEqual, 0)
   388  		})
   389  
   390  		Convey("if existing alternate distros can not handle the tasks, more "+
   391  			"hosts are required - within poolsize", func() {
   392  			distroOneScheduleData := DistroScheduleData{
   393  				numExistingHosts:   5,
   394  				nominalNumNewHosts: 0,
   395  				poolSize:           80,
   396  				taskQueueLength:    40,
   397  				numFreeHosts:       2,
   398  				sharedTasksDuration: map[string]float64{
   399  					distroTwo: 500,
   400  				},
   401  				runningTasksDuration: 100,
   402  				totalTasksDuration:   2,
   403  			}
   404  
   405  			distroTwoScheduleData := DistroScheduleData{
   406  				numExistingHosts:   20,
   407  				nominalNumNewHosts: 0,
   408  				poolSize:           20,
   409  				taskQueueLength:    2,
   410  				numFreeHosts:       2,
   411  				sharedTasksDuration: map[string]float64{
   412  					distroOne: 500,
   413  				},
   414  				runningTasksDuration: 200,
   415  				totalTasksDuration:   2000,
   416  			}
   417  
   418  			maxDurationPerDistroHost := time.Duration(20) * time.Second
   419  			distroScheduleData := map[string]DistroScheduleData{
   420  				distroOne: distroOneScheduleData,
   421  				distroTwo: distroTwoScheduleData,
   422  			}
   423  			So(orderedScheduleNumNewHosts(distroScheduleData, distroOne,
   424  				maxDurationPerDistroHost, 1.0), ShouldEqual, 30)
   425  		})
   426  	})
   427  }
   428  
   429  func TestSortDistrosByNumStaticHosts(t *testing.T) {
   430  	Convey("When calling sortDistrosByNumStaticHosts...", t, func() {
   431  		Convey("distro hosts should be sorted by the number of static hosts", func() {
   432  			getDistro := func(j int) (d distro.Distro) {
   433  				r := []interface{}{}
   434  				for i := 0; i < j; i++ {
   435  					r = append(r, map[interface{}]interface{}{"name": strconv.Itoa(i)})
   436  				}
   437  				d.Id = strconv.Itoa(j)
   438  				d.Provider = evergreen.HostTypeStatic
   439  				d.ProviderSettings = &map[string]interface{}{"hosts": r}
   440  				return d
   441  			}
   442  
   443  			order := []int{0, 2, 1, 4, 6, 3, 5}
   444  			distros := make([]distro.Distro, 0, len(order))
   445  			hosts := make([]string, 0, len(order))
   446  			for i, o := range order {
   447  				distros = append(distros, getDistro(o))
   448  				hosts = append(hosts, strconv.Itoa(i))
   449  			}
   450  
   451  			newDistros := sortDistrosByNumStaticHosts(distros, hostAllocatorTestConf)
   452  
   453  			So(len(distros), ShouldEqual, len(newDistros))
   454  			So(newDistros[0].Id, ShouldEqual, hosts[6])
   455  			So(newDistros[1].Id, ShouldEqual, hosts[5])
   456  			So(newDistros[2].Id, ShouldEqual, hosts[4])
   457  			So(newDistros[3].Id, ShouldEqual, hosts[3])
   458  			So(newDistros[4].Id, ShouldEqual, hosts[2])
   459  			So(newDistros[5].Id, ShouldEqual, hosts[1])
   460  			So(newDistros[6].Id, ShouldEqual, hosts[0])
   461  		})
   462  	})
   463  }
   464  
   465  func TestComputeDurationBasedNumNewHosts(t *testing.T) {
   466  	Convey("When calling computeDurationBasedNumNewHosts...", t, func() {
   467  
   468  		Convey("when there's an abundance of hosts, no new hosts are needed",
   469  			func() {
   470  				scheduledTasksDuration := 120.
   471  				runningTasksDuration := 120.
   472  				numExistingHosts := 10.
   473  				maxDurationPerHost := time.Duration(200) * time.Second
   474  				numNewHosts := computeDurationBasedNumNewHosts(
   475  					scheduledTasksDuration, runningTasksDuration,
   476  					numExistingHosts, maxDurationPerHost)
   477  				So(numNewHosts, ShouldEqual, 0)
   478  			})
   479  
   480  		Convey("when there's an insufficient number of hosts, new hosts are "+
   481  			"needed", func() {
   482  			scheduledTasksDuration := 120.
   483  			runningTasksDuration := 120.
   484  			numExistingHosts := 10.
   485  			maxDurationPerHost := time.Duration(20) * time.Second
   486  			numNewHosts := computeDurationBasedNumNewHosts(
   487  				scheduledTasksDuration, runningTasksDuration,
   488  				numExistingHosts, maxDurationPerHost)
   489  			So(numNewHosts, ShouldEqual, 2)
   490  		})
   491  
   492  		Convey("when the durations of existing tasks is short, no new hosts "+
   493  			"are needed", func() {
   494  			scheduledTasksDuration := 12.
   495  			runningTasksDuration := 10.
   496  			numExistingHosts := 10.
   497  			maxDurationPerHost := time.Duration(20) * time.Second
   498  			numNewHosts := computeDurationBasedNumNewHosts(
   499  				scheduledTasksDuration, runningTasksDuration,
   500  				numExistingHosts, maxDurationPerHost)
   501  			So(numNewHosts, ShouldEqual, 0)
   502  		})
   503  
   504  		Convey("when the durations of existing tasks is less than the "+
   505  			"maximum duration, exactly one host is needed", func() {
   506  			scheduledTasksDuration := 12.
   507  			runningTasksDuration := 10.
   508  			numExistingHosts := 0.
   509  			maxDurationPerHost := time.Duration(23) * time.Second
   510  			numNewHosts := computeDurationBasedNumNewHosts(
   511  				scheduledTasksDuration, runningTasksDuration,
   512  				numExistingHosts, maxDurationPerHost)
   513  			So(numNewHosts, ShouldEqual, 1)
   514  		})
   515  
   516  		Convey("when the durations of existing tasks is equal to the "+
   517  			"maximum duration, exactly one host is needed", func() {
   518  			scheduledTasksDuration := 12.
   519  			runningTasksDuration := 12.
   520  			numExistingHosts := 0.
   521  			maxDurationPerHost := time.Duration(24) * time.Second
   522  			numNewHosts := computeDurationBasedNumNewHosts(
   523  				scheduledTasksDuration, runningTasksDuration,
   524  				numExistingHosts, maxDurationPerHost)
   525  			So(numNewHosts, ShouldEqual, 1)
   526  		})
   527  
   528  		Convey("when the durations of existing tasks is slightly more than "+
   529  			"the maximum duration, exactly two hosts are needed", func() {
   530  			scheduledTasksDuration := 12.
   531  			runningTasksDuration := 13.
   532  			numExistingHosts := 0.
   533  			maxDurationPerHost := time.Duration(24) * time.Second
   534  			numNewHosts := computeDurationBasedNumNewHosts(
   535  				scheduledTasksDuration, runningTasksDuration,
   536  				numExistingHosts, maxDurationPerHost)
   537  			So(numNewHosts, ShouldEqual, 2)
   538  		})
   539  	})
   540  }
   541  
   542  func TestComputeRunningTasksDuration(t *testing.T) {
   543  	var testTaskDuration time.Duration
   544  	var hostIds []string
   545  	var runningTaskIds []string
   546  	var taskDurations model.ProjectTaskDurations
   547  
   548  	Convey("When calling computeRunningTasksDuration...", t, func() {
   549  		// set all variables
   550  		testTaskDuration = time.Duration(4) * time.Minute
   551  		hostIds = []string{"h1", "h2", "h3", "h4", "h5", "h6"}
   552  		runningTaskIds = []string{"t1", "t2", "t3", "t4", "t5", "t6"}
   553  
   554  		startTimeOne := time.Now()
   555  		startTimeTwo := startTimeOne.Add(-time.Duration(1) * time.Minute)
   556  		startTimeThree := startTimeOne.Add(-time.Duration(2) * time.Minute)
   557  
   558  		remainingDurationOne := (time.Duration(4) * time.Minute).Seconds()
   559  		remainingDurationTwo := (time.Duration(3) * time.Minute).Seconds()
   560  		remainingDurationThree := (time.Duration(2) * time.Minute).Seconds()
   561  
   562  		// durations of tasks we know
   563  		taskDurations = model.ProjectTaskDurations{
   564  			TaskDurationByProject: map[string]*model.BuildVariantTaskDurations{
   565  				"": {
   566  					TaskDurationByBuildVariant: map[string]*model.TaskDurations{
   567  						"": {
   568  							TaskDurationByDisplayName: map[string]time.
   569  								Duration{
   570  								"": testTaskDuration,
   571  							},
   572  						},
   573  					},
   574  				},
   575  			},
   576  		}
   577  
   578  		So(db.Clear(task.Collection), ShouldBeNil)
   579  
   580  		Convey("the total duration of running tasks with similar start times "+
   581  			" should be the total of the remaining time using estimates from "+
   582  			"the project task duration data for running tasks", func() {
   583  			// tasks running on hosts
   584  			runningTasks := []task.Task{
   585  				{Id: runningTaskIds[0], StartTime: startTimeOne},
   586  				{Id: runningTaskIds[1], StartTime: startTimeOne},
   587  				{Id: runningTaskIds[2], StartTime: startTimeOne},
   588  			}
   589  
   590  			for _, runningTask := range runningTasks {
   591  				So(runningTask.Insert(), ShouldBeNil)
   592  			}
   593  
   594  			// running tasks have a time to completion of about 1 minute
   595  			existingDistroHosts := []host.Host{
   596  				{Id: hostIds[0]},
   597  				{Id: hostIds[1], RunningTask: runningTaskIds[0]},
   598  				{Id: hostIds[2], RunningTask: runningTaskIds[1]},
   599  				{Id: hostIds[3]},
   600  				{Id: hostIds[4], RunningTask: runningTaskIds[2]},
   601  			}
   602  
   603  			runningTasksDuration, err :=
   604  				computeRunningTasksDuration(existingDistroHosts, taskDurations)
   605  
   606  			So(err, ShouldBeNil)
   607  
   608  			// the running task duration should be a total of the remaining
   609  			// duration of running tasks - 3 in this case
   610  			// due to scheduling variables, we allow a 10 second tolerance
   611  			So(runningTasksDuration, ShouldAlmostEqual, remainingDurationOne*3, 10)
   612  		})
   613  
   614  		Convey("the total duration of running tasks with different start "+
   615  			"times should be the total of the remaining time using estimates "+
   616  			"from the project task duration data for running tasks", func() {
   617  
   618  			// running tasks have a time to completion of about 1 minute
   619  			existingDistroHosts := []host.Host{
   620  				{Id: hostIds[0], RunningTask: runningTaskIds[0]},
   621  				{Id: hostIds[1], RunningTask: runningTaskIds[1]},
   622  				{Id: hostIds[2], RunningTask: runningTaskIds[2]},
   623  				{Id: hostIds[3], RunningTask: runningTaskIds[3]},
   624  				{Id: hostIds[4], RunningTask: runningTaskIds[4]},
   625  				{Id: hostIds[5], RunningTask: runningTaskIds[5]},
   626  			}
   627  
   628  			// tasks running on hosts
   629  			runningTasks := []task.Task{
   630  				{Id: runningTaskIds[0], StartTime: startTimeThree},
   631  				{Id: runningTaskIds[1], StartTime: startTimeTwo},
   632  				{Id: runningTaskIds[2], StartTime: startTimeOne},
   633  				{Id: runningTaskIds[3], StartTime: startTimeTwo},
   634  				{Id: runningTaskIds[4], StartTime: startTimeOne},
   635  				{Id: runningTaskIds[5], StartTime: startTimeThree},
   636  			}
   637  
   638  			for _, runningTask := range runningTasks {
   639  				So(runningTask.Insert(), ShouldBeNil)
   640  			}
   641  
   642  			runningTasksDuration, err :=
   643  				computeRunningTasksDuration(existingDistroHosts, taskDurations)
   644  			So(err, ShouldBeNil)
   645  			// the running task duration should be a total of the remaining
   646  			// duration of running tasks - 6 in this case
   647  			// due to scheduling variables, we allow a 5 second tolerance
   648  			expectedResult := remainingDurationOne*2 + remainingDurationTwo*2 +
   649  				remainingDurationThree*2
   650  			So(runningTasksDuration, ShouldAlmostEqual, expectedResult, 5)
   651  		})
   652  
   653  		Convey("the duration of running tasks with unknown running time "+
   654  			"estimates should be accounted for", func() {
   655  
   656  			// running tasks have a time to completion of about 1 minute
   657  			existingDistroHosts := []host.Host{
   658  				{Id: hostIds[0], RunningTask: runningTaskIds[0]},
   659  				{Id: hostIds[1], RunningTask: runningTaskIds[1]},
   660  				{Id: hostIds[2], RunningTask: runningTaskIds[2]},
   661  			}
   662  
   663  			// tasks running on hosts
   664  			runningTasks := []task.Task{
   665  				{Id: runningTaskIds[0], StartTime: startTimeThree, DisplayName: "unknown"},
   666  				{Id: runningTaskIds[1], StartTime: startTimeTwo},
   667  				{Id: runningTaskIds[2], StartTime: startTimeOne, DisplayName: "unknown"},
   668  			}
   669  
   670  			for _, runningTask := range runningTasks {
   671  				So(runningTask.Insert(), ShouldBeNil)
   672  			}
   673  
   674  			runningTasksDuration, err :=
   675  				computeRunningTasksDuration(existingDistroHosts, taskDurations)
   676  			So(err, ShouldBeNil)
   677  			// only task 1's duration is known, so the others should use the default.
   678  			expectedDur := remainingDurationTwo + float64((2*model.DefaultTaskDuration)/time.Second)
   679  			So(runningTasksDuration, ShouldAlmostEqual, expectedDur, 200)
   680  		})
   681  
   682  		Convey("the duration of running tasks with outliers as running times "+
   683  			"should be ignored", func() {
   684  
   685  			// running tasks have a time to completion of about 1 minute
   686  			existingDistroHosts := []host.Host{
   687  				{Id: hostIds[0], RunningTask: runningTaskIds[0]},
   688  				{Id: hostIds[1], RunningTask: runningTaskIds[1]},
   689  				{Id: hostIds[2], RunningTask: runningTaskIds[2]},
   690  			}
   691  
   692  			// tasks running on hosts
   693  			runningTasks := []task.Task{
   694  				{Id: runningTaskIds[0], StartTime: startTimeOne},
   695  				{Id: runningTaskIds[1], StartTime: startTimeOne.Add(-time.Duration(4) * time.Hour)},
   696  				{Id: runningTaskIds[2], StartTime: startTimeTwo},
   697  			}
   698  
   699  			for _, runningTask := range runningTasks {
   700  				So(runningTask.Insert(), ShouldBeNil)
   701  			}
   702  
   703  			runningTasksDuration, err :=
   704  				computeRunningTasksDuration(existingDistroHosts, taskDurations)
   705  			So(err, ShouldBeNil)
   706  			// task 2's duration should be ignored
   707  			// due to scheduling variables, we allow a 5 second tolerance
   708  			expectedResult := remainingDurationOne + remainingDurationTwo
   709  			So(runningTasksDuration, ShouldAlmostEqual, expectedResult, 5)
   710  		})
   711  
   712  		Convey("the total duration if there are no running tasks should be "+
   713  			"zero", func() {
   714  
   715  			// running tasks have a time to completion of about 1 minute
   716  			existingDistroHosts := []host.Host{
   717  				{Id: hostIds[0]},
   718  				{Id: hostIds[1]},
   719  				{Id: hostIds[2]},
   720  				{Id: hostIds[3]},
   721  			}
   722  
   723  			runningTasksDuration, err :=
   724  				computeRunningTasksDuration(existingDistroHosts, taskDurations)
   725  			So(err, ShouldBeNil)
   726  			// the running task duration should be a total of the remaining
   727  			// duration of running tasks
   728  			So(runningTasksDuration, ShouldEqual, 0)
   729  		})
   730  	})
   731  }
   732  
   733  func TestComputeScheduledTasksDuration(t *testing.T) {
   734  	var expDur time.Duration
   735  	var tasks []string
   736  	var queueItems []model.TaskQueueItem
   737  	var tasksAccountedFor map[string]bool
   738  
   739  	Convey("When calling computeScheduledTasksDuration...", t, func() {
   740  		Convey("the total scheduled tasks duration should equal the duration "+
   741  			"of all tasks scheduled, for that distro, in the queue", func() {
   742  			tasks = []string{"t1", "t2", "t3", "t4", "t5", "t6"}
   743  			expDur = time.Duration(180) * time.Minute
   744  			tasksAccountedFor = make(map[string]bool)
   745  			queueItems = []model.TaskQueueItem{
   746  				{Id: tasks[0], ExpectedDuration: expDur},
   747  				{Id: tasks[1], ExpectedDuration: expDur},
   748  				{Id: tasks[2], ExpectedDuration: expDur},
   749  				{Id: tasks[3], ExpectedDuration: expDur},
   750  				{Id: tasks[4], ExpectedDuration: expDur},
   751  			}
   752  
   753  			// construct the data needed by computeScheduledTasksDuration
   754  			scheduledDistroTasksData := &ScheduledDistroTasksData{
   755  				taskQueueItems:    queueItems,
   756  				tasksAccountedFor: tasksAccountedFor,
   757  			}
   758  
   759  			scheduledTasksDuration, _ := computeScheduledTasksDuration(
   760  				scheduledDistroTasksData)
   761  
   762  			expectedTotalDuration := float64(len(queueItems)) * expDur.Seconds()
   763  			So(scheduledTasksDuration, ShouldEqual, expectedTotalDuration)
   764  		})
   765  
   766  		Convey("the map of tasks accounted for should be updated", func() {
   767  			tasks = []string{"t1", "t2", "t3", "t4", "t5", "t6"}
   768  			expDur = time.Duration(180) * time.Minute
   769  			tasksAccountedFor = make(map[string]bool)
   770  			queueItems = []model.TaskQueueItem{
   771  				{Id: tasks[0], ExpectedDuration: expDur},
   772  				{Id: tasks[1], ExpectedDuration: expDur},
   773  				{Id: tasks[2], ExpectedDuration: expDur},
   774  				{Id: tasks[3], ExpectedDuration: expDur},
   775  				{Id: tasks[4], ExpectedDuration: expDur},
   776  			}
   777  
   778  			// construct the data needed by computeScheduledTasksDuration
   779  			scheduledDistroTasksData := &ScheduledDistroTasksData{
   780  				taskQueueItems:    queueItems,
   781  				tasksAccountedFor: tasksAccountedFor,
   782  			}
   783  
   784  			computeScheduledTasksDuration(scheduledDistroTasksData)
   785  
   786  			expectedTasksAccountedFor := map[string]bool{
   787  				tasks[0]: true,
   788  				tasks[1]: true,
   789  				tasks[2]: true,
   790  				tasks[3]: true,
   791  				tasks[4]: true,
   792  			}
   793  
   794  			So(tasksAccountedFor, ShouldResemble, expectedTasksAccountedFor)
   795  		})
   796  
   797  		Convey("other distro task queues with the same task should disregard "+
   798  			"the duplicate entry in other queues", func() {
   799  			tasks = []string{"t1", "t2", "t3", "t4", "t5", "t6"}
   800  			expDur = time.Duration(180) * time.Minute
   801  			tasksAccountedFor = make(map[string]bool)
   802  			distroOneQueueItems := []model.TaskQueueItem{
   803  				{Id: tasks[0], ExpectedDuration: expDur},
   804  				{Id: tasks[1], ExpectedDuration: expDur},
   805  				{Id: tasks[2], ExpectedDuration: expDur},
   806  				{Id: tasks[3], ExpectedDuration: expDur},
   807  				{Id: tasks[4], ExpectedDuration: expDur},
   808  			}
   809  
   810  			// construct the data needed by computeScheduledTasksDuration
   811  			scheduledDistroTasksData := &ScheduledDistroTasksData{
   812  				taskQueueItems:    distroOneQueueItems,
   813  				tasksAccountedFor: tasksAccountedFor,
   814  			}
   815  
   816  			computeScheduledTasksDuration(scheduledDistroTasksData)
   817  
   818  			expectedTasksAccountedFor := map[string]bool{
   819  				tasks[0]: true,
   820  				tasks[1]: true,
   821  				tasks[2]: true,
   822  				tasks[3]: true,
   823  				tasks[4]: true,
   824  			}
   825  
   826  			So(tasksAccountedFor, ShouldResemble, expectedTasksAccountedFor)
   827  
   828  			// task 0 appears in both task queues so it's duration should be
   829  			// ignored. task 5 is new so its duration should be used and the
   830  			// map should be updated to include it
   831  			distroTwoQueueItems := []model.TaskQueueItem{
   832  				{Id: tasks[0], ExpectedDuration: expDur},
   833  				{Id: tasks[5], ExpectedDuration: expDur},
   834  			}
   835  			expectedTasksAccountedFor[tasks[5]] = true
   836  
   837  			// construct the data needed by computeScheduledTasksDuration
   838  			scheduledDistroTasksData = &ScheduledDistroTasksData{
   839  				taskQueueItems:    distroTwoQueueItems,
   840  				tasksAccountedFor: tasksAccountedFor,
   841  			}
   842  
   843  			scheduledTasksDuration, _ := computeScheduledTasksDuration(
   844  				scheduledDistroTasksData)
   845  
   846  			So(tasksAccountedFor, ShouldResemble, expectedTasksAccountedFor)
   847  			So(scheduledTasksDuration, ShouldEqual, expDur.Seconds())
   848  		})
   849  	})
   850  }
   851  
   852  func TestDurationBasedHostAllocator(t *testing.T) {
   853  	var taskIds []string
   854  	var runningTaskIds []string
   855  	var hostIds []string
   856  	var dist distro.Distro
   857  	var testTaskDuration time.Duration
   858  	var taskDurations model.ProjectTaskDurations
   859  	var durationBasedHostAllocator *DurationBasedHostAllocator
   860  
   861  	Convey("With a duration based host allocator,"+
   862  		" determining the number of new hosts to spin up", t, func() {
   863  
   864  		durationBasedHostAllocator = &DurationBasedHostAllocator{}
   865  		taskIds = []string{"t1", "t2", "t3", "t4", "t5"}
   866  		runningTaskIds = []string{"t1", "t2", "t3", "t4", "t5"}
   867  		hostIds = []string{"h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9"}
   868  		dist = distro.Distro{Provider: "ec2"}
   869  		testTaskDuration = time.Duration(2) * time.Minute
   870  		taskDurations = model.ProjectTaskDurations{
   871  			TaskDurationByProject: map[string]*model.BuildVariantTaskDurations{
   872  				"": {
   873  					TaskDurationByBuildVariant: map[string]*model.TaskDurations{
   874  						"": {
   875  							TaskDurationByDisplayName: map[string]time.Duration{
   876  								"": testTaskDuration,
   877  							},
   878  						},
   879  					},
   880  				},
   881  			},
   882  		}
   883  
   884  		So(db.Clear(task.Collection), ShouldBeNil)
   885  
   886  		Convey("if there are no tasks to run, no new hosts should be needed",
   887  			func() {
   888  				hosts := []host.Host{
   889  					{Id: hostIds[0]},
   890  					{Id: hostIds[1]},
   891  					{Id: hostIds[2]},
   892  				}
   893  				dist.PoolSize = len(hosts) + 5
   894  
   895  				hostAllocatorData := &HostAllocatorData{
   896  					existingDistroHosts: map[string][]host.Host{
   897  						"": hosts,
   898  					},
   899  					distros: map[string]distro.Distro{
   900  						"": dist,
   901  					},
   902  				}
   903  
   904  				tasksAccountedFor := make(map[string]bool)
   905  				distroScheduleData := make(map[string]DistroScheduleData)
   906  
   907  				newHosts, err := durationBasedHostAllocator.
   908  					numNewHostsForDistro(hostAllocatorData, dist,
   909  						tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
   910  				So(err, ShouldBeNil)
   911  				So(newHosts, ShouldEqual, 0)
   912  			})
   913  
   914  		Convey("if the number of existing hosts equals the max hosts, no new"+
   915  			" hosts can be spawned", func() {
   916  			taskQueueItems := []model.TaskQueueItem{
   917  				{Id: taskIds[0]},
   918  				{Id: taskIds[1]},
   919  				{Id: taskIds[2]},
   920  				{Id: taskIds[3]},
   921  			}
   922  			dist.PoolSize = 0
   923  
   924  			hostAllocatorData := &HostAllocatorData{
   925  				existingDistroHosts: map[string][]host.Host{},
   926  				distros: map[string]distro.Distro{
   927  					"": dist,
   928  				},
   929  			}
   930  
   931  			tasksAccountedFor := make(map[string]bool)
   932  			distroScheduleData := make(map[string]DistroScheduleData)
   933  
   934  			newHosts, err := durationBasedHostAllocator.
   935  				numNewHostsForDistro(hostAllocatorData, dist,
   936  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
   937  			So(err, ShouldBeNil)
   938  			So(newHosts, ShouldEqual, 0)
   939  			hosts := []host.Host{
   940  				{Id: hostIds[0]},
   941  			}
   942  			dist.PoolSize = len(hosts)
   943  
   944  			hostAllocatorData = &HostAllocatorData{
   945  				taskQueueItems: map[string][]model.TaskQueueItem{
   946  					"": taskQueueItems,
   947  				},
   948  				existingDistroHosts: map[string][]host.Host{
   949  					"": hosts,
   950  				},
   951  				distros: map[string]distro.Distro{
   952  					"": dist,
   953  				},
   954  			}
   955  
   956  			tasksAccountedFor = make(map[string]bool)
   957  			distroScheduleData = make(map[string]DistroScheduleData)
   958  
   959  			newHosts, err = durationBasedHostAllocator.
   960  				numNewHostsForDistro(hostAllocatorData, dist,
   961  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
   962  			So(err, ShouldBeNil)
   963  			So(newHosts, ShouldEqual, 0)
   964  		})
   965  
   966  		Convey("if the number of existing hosts exceeds the max hosts, no new"+
   967  			" hosts can be spawned", func() {
   968  
   969  			taskQueueItems := []model.TaskQueueItem{
   970  				{Id: taskIds[0]},
   971  				{Id: taskIds[1]},
   972  				{Id: taskIds[2]},
   973  				{Id: taskIds[3]},
   974  			}
   975  			hosts := []host.Host{
   976  				{Id: hostIds[0]},
   977  				{Id: hostIds[1]},
   978  			}
   979  			dist.PoolSize = 1
   980  
   981  			hostAllocatorData := &HostAllocatorData{
   982  				taskQueueItems: map[string][]model.TaskQueueItem{
   983  					"": taskQueueItems,
   984  				},
   985  				existingDistroHosts: map[string][]host.Host{
   986  					"": hosts,
   987  				},
   988  				distros: map[string]distro.Distro{
   989  					"": dist,
   990  				},
   991  			}
   992  
   993  			tasksAccountedFor := make(map[string]bool)
   994  			distroScheduleData := make(map[string]DistroScheduleData)
   995  
   996  			newHosts, err := durationBasedHostAllocator.
   997  				numNewHostsForDistro(hostAllocatorData, dist,
   998  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
   999  			So(err, ShouldBeNil)
  1000  			So(newHosts, ShouldEqual, 0)
  1001  		})
  1002  
  1003  		Convey("if the number of tasks to run is less than the number of free"+
  1004  			" hosts, no new hosts are needed", func() {
  1005  			taskQueueItems := []model.TaskQueueItem{
  1006  				{Id: taskIds[0]},
  1007  				{Id: taskIds[1]},
  1008  			}
  1009  			hosts := []host.Host{
  1010  				{Id: hostIds[0]},
  1011  				{Id: hostIds[1]},
  1012  				{Id: hostIds[2]},
  1013  			}
  1014  			dist.PoolSize = len(hosts) + 5
  1015  
  1016  			hostAllocatorData := &HostAllocatorData{
  1017  				taskQueueItems: map[string][]model.TaskQueueItem{
  1018  					"": taskQueueItems,
  1019  				},
  1020  				existingDistroHosts: map[string][]host.Host{
  1021  					"": hosts,
  1022  				},
  1023  				distros: map[string]distro.Distro{
  1024  					"": dist,
  1025  				},
  1026  			}
  1027  
  1028  			tasksAccountedFor := make(map[string]bool)
  1029  			distroScheduleData := make(map[string]DistroScheduleData)
  1030  
  1031  			newHosts, err := durationBasedHostAllocator.
  1032  				numNewHostsForDistro(hostAllocatorData, dist,
  1033  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1034  			So(err, ShouldBeNil)
  1035  			So(newHosts, ShouldEqual, 0)
  1036  
  1037  		})
  1038  
  1039  		Convey("if the number of tasks to run is equal to the number of free"+
  1040  			" hosts, no new hosts are needed", func() {
  1041  			hosts := []host.Host{
  1042  				{Id: hostIds[0]},
  1043  				{Id: hostIds[1], RunningTask: runningTaskIds[0]},
  1044  				{Id: hostIds[2], RunningTask: runningTaskIds[1]},
  1045  				{Id: hostIds[3]},
  1046  			}
  1047  			taskQueueItems := []model.TaskQueueItem{
  1048  				{Id: taskIds[0]},
  1049  				{Id: taskIds[1]},
  1050  			}
  1051  
  1052  			dist.PoolSize = len(hosts) + 5
  1053  
  1054  			hostAllocatorData := &HostAllocatorData{
  1055  				taskQueueItems: map[string][]model.TaskQueueItem{
  1056  					"": taskQueueItems,
  1057  				},
  1058  				existingDistroHosts: map[string][]host.Host{
  1059  					"": hosts,
  1060  				},
  1061  				distros: map[string]distro.Distro{
  1062  					"": dist,
  1063  				},
  1064  				projectTaskDurations: taskDurations,
  1065  			}
  1066  
  1067  			tasksAccountedFor := make(map[string]bool)
  1068  			distroScheduleData := make(map[string]DistroScheduleData)
  1069  
  1070  			// tasks running on hosts
  1071  			for _, runningTaskId := range runningTaskIds {
  1072  				task := task.Task{Id: runningTaskId}
  1073  				So(task.Insert(), ShouldBeNil)
  1074  			}
  1075  
  1076  			newHosts, err := durationBasedHostAllocator.
  1077  				numNewHostsForDistro(hostAllocatorData, dist,
  1078  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1079  			So(err, ShouldBeNil)
  1080  			So(newHosts, ShouldEqual, 0)
  1081  		})
  1082  
  1083  		Convey("if the number of tasks to run exceeds the number of free"+
  1084  			" hosts, new hosts are needed up to the maximum allowed for the"+
  1085  			" dist", func() {
  1086  			expDur := time.Duration(200) * time.Minute
  1087  			// all runnable tasks have an expected duration of expDur (200mins)
  1088  			taskQueueItems := []model.TaskQueueItem{
  1089  				{Id: taskIds[0], ExpectedDuration: expDur},
  1090  				{Id: taskIds[1], ExpectedDuration: expDur},
  1091  				{Id: taskIds[2], ExpectedDuration: expDur},
  1092  				{Id: taskIds[3], ExpectedDuration: expDur},
  1093  				{Id: taskIds[4], ExpectedDuration: expDur},
  1094  			}
  1095  			// running tasks have a time to completion of about 1 minute
  1096  			hosts := []host.Host{
  1097  				{Id: hostIds[0]},
  1098  				{Id: hostIds[1], RunningTask: runningTaskIds[0]},
  1099  				{Id: hostIds[2], RunningTask: runningTaskIds[1]},
  1100  				{Id: hostIds[3]},
  1101  				{Id: hostIds[4], RunningTask: runningTaskIds[2]},
  1102  			}
  1103  			dist.PoolSize = 9
  1104  
  1105  			// In this test:
  1106  			//
  1107  			// 1. Total distro duration is:
  1108  			//		(len(taskQueueItems) * expDur ) +
  1109  			//		time left on hosts with running tasks
  1110  			// which comes out to:
  1111  			//		(5 * 200 * 60) + (60 * 3) ~ 60180 (in seconds)
  1112  			//
  1113  			// 2. MAX_DURATION_PER_DISTRO = 7200 (2 hours)
  1114  			//
  1115  			// 3. We have 5 existing hosts
  1116  			//
  1117  			// Thus, our duration based host allocator will always return 8 -
  1118  			// which is greater than what distro.PoolSize-len(existingDistroHosts)
  1119  			// will ever return in this situation.
  1120  			//
  1121  			// Hence, we should always expect to use that minimum.
  1122  			//
  1123  			hostAllocatorData := &HostAllocatorData{
  1124  				taskQueueItems: map[string][]model.TaskQueueItem{
  1125  					"": taskQueueItems,
  1126  				},
  1127  				existingDistroHosts: map[string][]host.Host{
  1128  					"": hosts,
  1129  				},
  1130  				distros: map[string]distro.Distro{
  1131  					"": dist,
  1132  				},
  1133  				projectTaskDurations: taskDurations,
  1134  			}
  1135  			tasksAccountedFor := make(map[string]bool)
  1136  			distroScheduleData := make(map[string]DistroScheduleData)
  1137  
  1138  			// tasks running on hosts
  1139  			for _, runningTaskId := range runningTaskIds {
  1140  				task := task.Task{Id: runningTaskId}
  1141  				So(task.Insert(), ShouldBeNil)
  1142  			}
  1143  
  1144  			// total running duration here is
  1145  			newHosts, err := durationBasedHostAllocator.
  1146  				numNewHostsForDistro(hostAllocatorData, dist,
  1147  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1148  			So(err, ShouldBeNil)
  1149  			So(newHosts, ShouldEqual, 3)
  1150  
  1151  			dist.PoolSize = 8
  1152  			hostAllocatorData = &HostAllocatorData{
  1153  				taskQueueItems: map[string][]model.TaskQueueItem{
  1154  					"": taskQueueItems,
  1155  				},
  1156  				existingDistroHosts: map[string][]host.Host{
  1157  					"": hosts,
  1158  				},
  1159  				distros: map[string]distro.Distro{
  1160  					"": dist,
  1161  				},
  1162  				projectTaskDurations: taskDurations,
  1163  			}
  1164  
  1165  			tasksAccountedFor = make(map[string]bool)
  1166  			distroScheduleData = make(map[string]DistroScheduleData)
  1167  
  1168  			newHosts, err = durationBasedHostAllocator.
  1169  				numNewHostsForDistro(hostAllocatorData, dist,
  1170  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1171  			So(err, ShouldBeNil)
  1172  			So(newHosts, ShouldEqual, 3)
  1173  			dist.PoolSize = 7
  1174  			hostAllocatorData = &HostAllocatorData{
  1175  				taskQueueItems: map[string][]model.TaskQueueItem{
  1176  					"": taskQueueItems,
  1177  				},
  1178  				existingDistroHosts: map[string][]host.Host{
  1179  					"": hosts,
  1180  				},
  1181  				distros: map[string]distro.Distro{
  1182  					"": dist,
  1183  				},
  1184  				projectTaskDurations: taskDurations,
  1185  			}
  1186  
  1187  			tasksAccountedFor = make(map[string]bool)
  1188  			distroScheduleData = make(map[string]DistroScheduleData)
  1189  
  1190  			newHosts, err = durationBasedHostAllocator.
  1191  				numNewHostsForDistro(hostAllocatorData, dist,
  1192  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1193  			So(err, ShouldBeNil)
  1194  			So(newHosts, ShouldEqual, 2)
  1195  
  1196  			dist.PoolSize = 6
  1197  
  1198  			hostAllocatorData = &HostAllocatorData{
  1199  				taskQueueItems: map[string][]model.TaskQueueItem{
  1200  					"": taskQueueItems,
  1201  				},
  1202  				existingDistroHosts: map[string][]host.Host{
  1203  					"": hosts,
  1204  				},
  1205  				distros: map[string]distro.Distro{
  1206  					"": dist,
  1207  				},
  1208  				projectTaskDurations: taskDurations,
  1209  			}
  1210  			tasksAccountedFor = make(map[string]bool)
  1211  
  1212  			newHosts, err = durationBasedHostAllocator.
  1213  				numNewHostsForDistro(hostAllocatorData, dist,
  1214  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1215  			So(err, ShouldBeNil)
  1216  			So(newHosts, ShouldEqual, 1)
  1217  		})
  1218  
  1219  		Convey("if the distro cannot be used to spawn hosts, then no new "+
  1220  			"hosts can be spawned", func() {
  1221  			expDur := time.Duration(200) * time.Minute
  1222  			// all runnable tasks have an expected duration of expDur (200mins)
  1223  			taskQueueItems := []model.TaskQueueItem{
  1224  				{Id: taskIds[0], ExpectedDuration: expDur},
  1225  				{Id: taskIds[1], ExpectedDuration: expDur},
  1226  				{Id: taskIds[2], ExpectedDuration: expDur},
  1227  				{Id: taskIds[3], ExpectedDuration: expDur},
  1228  				{Id: taskIds[4], ExpectedDuration: expDur},
  1229  			}
  1230  			// running tasks have a time to completion of about 1 minute
  1231  			hosts := []host.Host{
  1232  				{Id: hostIds[0]},
  1233  				{Id: hostIds[1]},
  1234  				{Id: hostIds[2]},
  1235  				{Id: hostIds[3]},
  1236  				{Id: hostIds[4]},
  1237  			}
  1238  
  1239  			dist.PoolSize = 20
  1240  			dist.Provider = "static"
  1241  
  1242  			hostAllocatorData := &HostAllocatorData{
  1243  				taskQueueItems: map[string][]model.TaskQueueItem{
  1244  					"": taskQueueItems,
  1245  				},
  1246  				existingDistroHosts: map[string][]host.Host{
  1247  					"": hosts,
  1248  				},
  1249  				distros: map[string]distro.Distro{
  1250  					"": dist,
  1251  				},
  1252  				projectTaskDurations: taskDurations,
  1253  			}
  1254  
  1255  			tasksAccountedFor := make(map[string]bool)
  1256  			distroScheduleData := make(map[string]DistroScheduleData)
  1257  
  1258  			newHosts, err := durationBasedHostAllocator.
  1259  				numNewHostsForDistro(hostAllocatorData, dist,
  1260  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1261  			So(err, ShouldBeNil)
  1262  			So(newHosts, ShouldEqual, 0)
  1263  		})
  1264  
  1265  		Convey("if the duration based estimate is less than the maximum "+
  1266  			"\nnumber of new hosts allowed for this distro, the estimate of new "+
  1267  			"\nhosts should be used", func() {
  1268  			expDur := time.Duration(200) * time.Minute
  1269  			// all runnable tasks have an expected duration of expDur (200mins)
  1270  			taskQueueItems := []model.TaskQueueItem{
  1271  				{Id: taskIds[0], ExpectedDuration: expDur},
  1272  				{Id: taskIds[1], ExpectedDuration: expDur},
  1273  				{Id: taskIds[2], ExpectedDuration: expDur},
  1274  				{Id: taskIds[3], ExpectedDuration: expDur},
  1275  				{Id: taskIds[4], ExpectedDuration: expDur},
  1276  			}
  1277  
  1278  			// running tasks have a time to completion of about 1 minute
  1279  			hosts := []host.Host{
  1280  				{Id: hostIds[0]},
  1281  				{Id: hostIds[1], RunningTask: runningTaskIds[0]},
  1282  				{Id: hostIds[2], RunningTask: runningTaskIds[1]},
  1283  				{Id: hostIds[3]},
  1284  				{Id: hostIds[4], RunningTask: runningTaskIds[2]},
  1285  			}
  1286  			dist.PoolSize = 20
  1287  
  1288  			hostAllocatorData := &HostAllocatorData{
  1289  				taskQueueItems: map[string][]model.TaskQueueItem{
  1290  					"": taskQueueItems,
  1291  				},
  1292  				existingDistroHosts: map[string][]host.Host{
  1293  					"": hosts,
  1294  				},
  1295  				distros: map[string]distro.Distro{
  1296  					"": dist,
  1297  				},
  1298  				projectTaskDurations: taskDurations,
  1299  			}
  1300  
  1301  			tasksAccountedFor := make(map[string]bool)
  1302  			distroScheduleData := make(map[string]DistroScheduleData)
  1303  
  1304  			// tasks running on hosts
  1305  			for _, runningTaskId := range runningTaskIds {
  1306  				task := task.Task{Id: runningTaskId}
  1307  				So(task.Insert(), ShouldBeNil)
  1308  			}
  1309  
  1310  			newHosts, err := durationBasedHostAllocator.
  1311  				numNewHostsForDistro(hostAllocatorData, dist,
  1312  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1313  			So(err, ShouldBeNil)
  1314  			So(newHosts, ShouldEqual, 3)
  1315  		})
  1316  
  1317  		Convey("if the duration based estimate is less than the maximum "+
  1318  			"\nnumber of new hosts allowed for this distro, but greater than "+
  1319  			"\nthe difference between the number of runnable tasks and the "+
  1320  			"\nnumber of free hosts, that difference should be used", func() {
  1321  			expDur := time.Duration(400) * time.Minute
  1322  			// all runnable tasks have an expected duration of expDur (200mins)
  1323  			taskQueueItems := []model.TaskQueueItem{
  1324  				{Id: taskIds[0], ExpectedDuration: expDur},
  1325  				{Id: taskIds[1], ExpectedDuration: expDur},
  1326  				{Id: taskIds[2], ExpectedDuration: expDur},
  1327  				{Id: taskIds[3], ExpectedDuration: expDur},
  1328  				{Id: taskIds[4], ExpectedDuration: expDur},
  1329  			}
  1330  
  1331  			// running tasks have a time to completion of about 1 minute
  1332  			hosts := []host.Host{
  1333  				{Id: hostIds[0]},
  1334  				{Id: hostIds[1], RunningTask: runningTaskIds[0]},
  1335  				{Id: hostIds[2], RunningTask: runningTaskIds[1]},
  1336  				{Id: hostIds[3]},
  1337  				{Id: hostIds[4], RunningTask: runningTaskIds[2]},
  1338  			}
  1339  			dist.PoolSize = 20
  1340  
  1341  			hostAllocatorData := &HostAllocatorData{
  1342  				taskQueueItems: map[string][]model.TaskQueueItem{
  1343  					"": taskQueueItems,
  1344  				},
  1345  				existingDistroHosts: map[string][]host.Host{
  1346  					"": hosts,
  1347  				},
  1348  				distros: map[string]distro.Distro{
  1349  					"": dist,
  1350  				},
  1351  				projectTaskDurations: taskDurations,
  1352  			}
  1353  
  1354  			tasksAccountedFor := make(map[string]bool)
  1355  			distroScheduleData := make(map[string]DistroScheduleData)
  1356  
  1357  			// tasks running on hosts
  1358  			for _, runningTaskId := range runningTaskIds {
  1359  				task := task.Task{Id: runningTaskId}
  1360  				So(task.Insert(), ShouldBeNil)
  1361  			}
  1362  
  1363  			// estimates based on data
  1364  			// duration estimate: 11
  1365  			// max new hosts allowed: 15
  1366  			// 'one-host-per-scheduled-task': 3
  1367  			newHosts, err := durationBasedHostAllocator.
  1368  				numNewHostsForDistro(hostAllocatorData, dist,
  1369  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1370  			So(err, ShouldBeNil)
  1371  			So(newHosts, ShouldEqual, 3)
  1372  		})
  1373  
  1374  		Convey("if the duration based estimate is less than both the maximum "+
  1375  			"\nnumber of new hosts allowed for this distro, and the "+
  1376  			"\ndifference between the number of runnable tasks and the "+
  1377  			"\nnumber of free hosts, then the duration based estimate should "+
  1378  			"be used", func() {
  1379  			expDur := time.Duration(180) * time.Minute
  1380  			// all runnable tasks have an expected duration of expDur (200mins)
  1381  			taskQueueItems := []model.TaskQueueItem{
  1382  				{Id: taskIds[0], ExpectedDuration: expDur},
  1383  				{Id: taskIds[1], ExpectedDuration: expDur},
  1384  				{Id: taskIds[2], ExpectedDuration: expDur},
  1385  				{Id: taskIds[3], ExpectedDuration: expDur},
  1386  				{Id: taskIds[4], ExpectedDuration: expDur},
  1387  			}
  1388  
  1389  			// running tasks have a time to completion of about 1 minute
  1390  			hosts := []host.Host{
  1391  				{Id: hostIds[0]},
  1392  				{Id: hostIds[1], RunningTask: runningTaskIds[0]},
  1393  				{Id: hostIds[2], RunningTask: runningTaskIds[1]},
  1394  				{Id: hostIds[3]},
  1395  				{Id: hostIds[4], RunningTask: runningTaskIds[2]},
  1396  				{Id: hostIds[5]},
  1397  			}
  1398  			dist.PoolSize = 20
  1399  
  1400  			hostAllocatorData := &HostAllocatorData{
  1401  				taskQueueItems: map[string][]model.TaskQueueItem{
  1402  					"": taskQueueItems,
  1403  				},
  1404  				existingDistroHosts: map[string][]host.Host{
  1405  					"": hosts,
  1406  				},
  1407  				distros: map[string]distro.Distro{
  1408  					"": dist,
  1409  				},
  1410  				projectTaskDurations: taskDurations,
  1411  			}
  1412  
  1413  			tasksAccountedFor := make(map[string]bool)
  1414  			distroScheduleData := make(map[string]DistroScheduleData)
  1415  
  1416  			// tasks running on hosts
  1417  			for _, runningTaskId := range runningTaskIds {
  1418  				task := task.Task{Id: runningTaskId}
  1419  				So(task.Insert(), ShouldBeNil)
  1420  			}
  1421  
  1422  			// estimates based on data
  1423  			// duration estimate: 2
  1424  			// max new hosts allowed: 15
  1425  			// 'one-host-per-scheduled-task': 3
  1426  			newHosts, err := durationBasedHostAllocator.
  1427  				numNewHostsForDistro(hostAllocatorData, dist,
  1428  					tasksAccountedFor, distroScheduleData, hostAllocatorTestConf)
  1429  			So(err, ShouldBeNil)
  1430  			So(newHosts, ShouldEqual, 2)
  1431  		})
  1432  	})
  1433  
  1434  }