github.com/juju/juju@v0.0.0-20240430160146-1752b71fcf00/worker/provisioner/provisioner_task_test.go (about)

     1  // Copyright 2018 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package provisioner_test
     5  
     6  import (
     7  	stdcontext "context"
     8  	"fmt"
     9  	"reflect"
    10  	"strings"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/juju/clock"
    15  	"github.com/juju/collections/set"
    16  	"github.com/juju/collections/transform"
    17  	"github.com/juju/errors"
    18  	"github.com/juju/loggo"
    19  	"github.com/juju/names/v5"
    20  	"github.com/juju/retry"
    21  	"github.com/juju/testing"
    22  	jc "github.com/juju/testing/checkers"
    23  	"github.com/juju/version/v2"
    24  	"github.com/juju/worker/v3/workertest"
    25  	"go.uber.org/mock/gomock"
    26  	gc "gopkg.in/check.v1"
    27  
    28  	"github.com/juju/juju/api"
    29  	apiprovisioner "github.com/juju/juju/api/agent/provisioner"
    30  	"github.com/juju/juju/controller/authentication"
    31  	corebase "github.com/juju/juju/core/base"
    32  	"github.com/juju/juju/core/constraints"
    33  	"github.com/juju/juju/core/instance"
    34  	"github.com/juju/juju/core/life"
    35  	"github.com/juju/juju/core/network"
    36  	"github.com/juju/juju/core/status"
    37  	"github.com/juju/juju/core/watcher"
    38  	"github.com/juju/juju/core/watcher/watchertest"
    39  	"github.com/juju/juju/environs"
    40  	"github.com/juju/juju/environs/config"
    41  	"github.com/juju/juju/environs/context"
    42  	"github.com/juju/juju/environs/imagemetadata"
    43  	"github.com/juju/juju/environs/instances"
    44  	providermocks "github.com/juju/juju/provider/common/mocks"
    45  	"github.com/juju/juju/rpc/params"
    46  	coretesting "github.com/juju/juju/testing"
    47  	jujuversion "github.com/juju/juju/version"
    48  	"github.com/juju/juju/worker/provisioner"
    49  	"github.com/juju/juju/worker/provisioner/mocks"
    50  )
    51  
    52  const numProvisionWorkersForTesting = 4
    53  
    54  type ProvisionerTaskSuite struct {
    55  	testing.IsolationSuite
    56  
    57  	setupDone            chan bool
    58  	modelMachinesChanges chan []string
    59  	modelMachinesWatcher watcher.StringsWatcher
    60  
    61  	machineErrorRetryChanges chan struct{}
    62  	machineErrorRetryWatcher watcher.NotifyWatcher
    63  
    64  	taskAPI *mocks.MockTaskAPI
    65  
    66  	instances      []instances.Instance
    67  	instanceBroker *testInstanceBroker
    68  
    69  	callCtx           *context.CloudCallContext
    70  	invalidCredential bool
    71  
    72  	auth *testAuthenticationProvider
    73  }
    74  
    75  var _ = gc.Suite(&ProvisionerTaskSuite{})
    76  
    77  func (s *ProvisionerTaskSuite) SetUpTest(c *gc.C) {
    78  	s.IsolationSuite.SetUpTest(c)
    79  
    80  	s.setupDone = make(chan bool)
    81  	s.modelMachinesChanges = make(chan []string)
    82  	s.modelMachinesWatcher = watchertest.NewMockStringsWatcher(s.modelMachinesChanges)
    83  
    84  	s.machineErrorRetryChanges = make(chan struct{})
    85  	s.machineErrorRetryWatcher = watchertest.NewMockNotifyWatcher(s.machineErrorRetryChanges)
    86  
    87  	s.instances = []instances.Instance{}
    88  	s.instanceBroker = &testInstanceBroker{
    89  		Stub:      &testing.Stub{},
    90  		callsChan: make(chan string, 2),
    91  		allInstancesFunc: func(ctx context.ProviderCallContext) ([]instances.Instance, error) {
    92  			return s.instances, s.instanceBroker.NextErr()
    93  		},
    94  	}
    95  
    96  	s.callCtx = &context.CloudCallContext{
    97  		Context: stdcontext.TODO(),
    98  		InvalidateCredentialFunc: func(string) error {
    99  			s.invalidCredential = true
   100  			return nil
   101  		},
   102  	}
   103  	s.auth = &testAuthenticationProvider{&testing.Stub{}}
   104  }
   105  
   106  func (s *ProvisionerTaskSuite) TestStartStop(c *gc.C) {
   107  	// We expect no calls to the task API.
   108  	defer s.setUpMocks(c).Finish()
   109  
   110  	task := s.newProvisionerTask(c,
   111  		config.HarvestAll,
   112  		&mockDistributionGroupFinder{},
   113  		mockToolsFinder{},
   114  		numProvisionWorkersForTesting,
   115  	)
   116  	workertest.CheckAlive(c, task)
   117  	workertest.CleanKill(c, task)
   118  
   119  	err := workertest.CheckKilled(c, task)
   120  	c.Assert(err, jc.ErrorIsNil)
   121  	err = workertest.CheckKilled(c, s.modelMachinesWatcher)
   122  	c.Assert(err, jc.ErrorIsNil)
   123  	err = workertest.CheckKilled(c, s.machineErrorRetryWatcher)
   124  	c.Assert(err, jc.ErrorIsNil)
   125  	s.instanceBroker.CheckNoCalls(c)
   126  }
   127  
   128  func (s *ProvisionerTaskSuite) TestStopInstancesIgnoresMachinesWithKeep(c *gc.C) {
   129  	defer s.setUpMocks(c).Finish()
   130  
   131  	i0 := &testInstance{id: "zero"}
   132  	i1 := &testInstance{id: "one"}
   133  	s.instances = []instances.Instance{
   134  		i0,
   135  		i1,
   136  	}
   137  
   138  	m0 := &testMachine{
   139  		id:       "0",
   140  		life:     life.Dead,
   141  		instance: i0,
   142  	}
   143  	m1 := &testMachine{
   144  		id:           "1",
   145  		life:         life.Dead,
   146  		instance:     i1,
   147  		keepInstance: true,
   148  	}
   149  
   150  	s.expectMachines(m0, m1)
   151  
   152  	task := s.newProvisionerTask(c,
   153  		config.HarvestAll,
   154  		&mockDistributionGroupFinder{},
   155  		mockToolsFinder{},
   156  		numProvisionWorkersForTesting,
   157  	)
   158  	defer workertest.CleanKill(c, task)
   159  
   160  	c.Assert(m0.markForRemoval, jc.IsFalse)
   161  	c.Assert(m1.markForRemoval, jc.IsFalse)
   162  
   163  	s.sendModelMachinesChange(c, "0", "1")
   164  
   165  	s.waitForTask(c, []string{"AllRunningInstances", "StopInstances"})
   166  
   167  	workertest.CleanKill(c, task)
   168  	close(s.instanceBroker.callsChan)
   169  	s.instanceBroker.CheckCalls(c, []testing.StubCall{
   170  		{"AllRunningInstances", []interface{}{s.callCtx}},
   171  		{"StopInstances", []interface{}{s.callCtx, []instance.Id{"zero"}}},
   172  	})
   173  	c.Assert(m0.markForRemoval, jc.IsTrue)
   174  	c.Assert(m1.markForRemoval, jc.IsTrue)
   175  }
   176  
   177  func (s *ProvisionerTaskSuite) TestProvisionerRetries(c *gc.C) {
   178  	defer s.setUpMocks(c).Finish()
   179  
   180  	m0 := &testMachine{id: "0"}
   181  	s.taskAPI.EXPECT().MachinesWithTransientErrors().Return(
   182  		[]apiprovisioner.MachineStatusResult{{Machine: m0, Status: params.StatusResult{}}}, nil)
   183  	s.expectProvisioningInfo(m0)
   184  
   185  	s.instanceBroker.SetErrors(
   186  		errors.New("errors 1"),
   187  		errors.New("errors 2"),
   188  	)
   189  
   190  	task := s.newProvisionerTaskWithRetry(c,
   191  		config.HarvestAll,
   192  		&mockDistributionGroupFinder{},
   193  		mockToolsFinder{},
   194  		provisioner.NewRetryStrategy(0*time.Second, 1),
   195  		numProvisionWorkersForTesting,
   196  	)
   197  
   198  	s.sendMachineErrorRetryChange(c)
   199  
   200  	s.waitForTask(c, []string{"StartInstance", "StartInstance"})
   201  
   202  	workertest.CleanKill(c, task)
   203  	close(s.instanceBroker.callsChan)
   204  	s.auth.CheckCallNames(c, "SetupAuthentication")
   205  	s.instanceBroker.CheckCallNames(c, "StartInstance", "StartInstance")
   206  }
   207  
   208  func (s *ProvisionerTaskSuite) TestEvenZonePlacement(c *gc.C) {
   209  	ctrl := s.setUpMocks(c)
   210  	defer ctrl.Finish()
   211  
   212  	// There are 3 available usedZones, so test with 4 machines
   213  	// to ensure even spread across usedZones.
   214  	machines := []*testMachine{{
   215  		id: "0",
   216  	}, {
   217  		id: "1",
   218  	}, {
   219  		id: "2",
   220  	}, {
   221  		id: "3",
   222  	}}
   223  	broker := s.setUpZonedEnviron(ctrl, machines...)
   224  	azConstraints := newAZConstraintStartInstanceParamsMatcher()
   225  	broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil).Times(len(machines))
   226  
   227  	// We need to coordinate access to usedZones by the worker, executing the
   228  	// expectations below on a separate Goroutine, and the test logic.
   229  	zoneLock := sync.Mutex{}
   230  	var usedZones []string
   231  
   232  	for _, m := range machines {
   233  		broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(&environs.StartInstanceResult{
   234  			Instance: &testInstance{id: "instance-" + m.id},
   235  		}, nil).Do(func(ctx, params interface{}) {
   236  			zoneLock.Lock()
   237  			usedZones = append(usedZones, params.(environs.StartInstanceParams).AvailabilityZone)
   238  			zoneLock.Unlock()
   239  		})
   240  	}
   241  
   242  	task := s.newProvisionerTaskWithBroker(c, broker, nil, numProvisionWorkersForTesting)
   243  	s.sendModelMachinesChange(c, "0", "1", "2", "3")
   244  
   245  	retryCallArgs := retry.CallArgs{
   246  		Clock:       clock.WallClock,
   247  		MaxDuration: coretesting.LongWait,
   248  		Delay:       10 * time.Millisecond,
   249  		Func: func() error {
   250  			zoneLock.Lock()
   251  			if len(usedZones) == 4 {
   252  				return nil
   253  			}
   254  			zoneLock.Unlock()
   255  			return errors.Errorf("Not ready yet")
   256  		},
   257  	}
   258  	err := retry.Call(retryCallArgs)
   259  	c.Assert(err, jc.ErrorIsNil)
   260  
   261  	zoneCounts := make(map[string]int)
   262  	for _, z := range usedZones {
   263  		count := zoneCounts[z] + 1
   264  		zoneCounts[z] = count
   265  	}
   266  	for z, count := range zoneCounts {
   267  		if count == 0 || count > 2 {
   268  			c.Fatalf("expected either 1 or 2 machines for %v, got %d", z, count)
   269  		}
   270  	}
   271  	c.Assert(set.NewStrings(usedZones...).SortedValues(), jc.DeepEquals, []string{"az1", "az2", "az3"})
   272  
   273  	workertest.CleanKill(c, task)
   274  }
   275  
   276  func (s *ProvisionerTaskSuite) TestMultipleSpaceConstraints(c *gc.C) {
   277  	ctrl := s.setUpMocks(c)
   278  	defer ctrl.Finish()
   279  
   280  	m0 := &testMachine{
   281  		id:          "0",
   282  		constraints: "spaces=alpha,beta",
   283  		topology: params.ProvisioningNetworkTopology{
   284  			SubnetAZs: map[string][]string{
   285  				"subnet-1": {"az-1"},
   286  				"subnet-2": {"az-2"},
   287  			},
   288  			SpaceSubnets: map[string][]string{
   289  				"alpha": {"subnet-1"},
   290  				"beta":  {"subnet-2"},
   291  			},
   292  		},
   293  	}
   294  	broker := s.setUpZonedEnviron(ctrl, m0)
   295  	spaceConstraints := newSpaceConstraintStartInstanceParamsMatcher("alpha", "beta")
   296  
   297  	spaceConstraints.addMatch("subnets-to-zones", func(p environs.StartInstanceParams) bool {
   298  		if len(p.SubnetsToZones) != 2 {
   299  			return false
   300  		}
   301  
   302  		// Order independence.
   303  		for _, subZones := range p.SubnetsToZones {
   304  			for sub, zones := range subZones {
   305  				var zone string
   306  
   307  				switch sub {
   308  				case "subnet-1":
   309  					zone = "az-1"
   310  				case "subnet-2":
   311  					zone = "az-2"
   312  				default:
   313  					return false
   314  				}
   315  
   316  				if len(zones) != 1 || zones[0] != zone {
   317  					return false
   318  				}
   319  			}
   320  		}
   321  
   322  		return true
   323  	})
   324  
   325  	broker.EXPECT().DeriveAvailabilityZones(s.callCtx, spaceConstraints).Return([]string{}, nil)
   326  
   327  	// Use satisfaction of this call as the synchronisation point.
   328  	started := make(chan struct{})
   329  	broker.EXPECT().StartInstance(s.callCtx, spaceConstraints).Return(&environs.StartInstanceResult{
   330  		Instance: &testInstance{id: "instance-0"},
   331  	}, nil).Do(func(context.ProviderCallContext, environs.StartInstanceParams) {
   332  		go func() { started <- struct{}{} }()
   333  	})
   334  	task := s.newProvisionerTaskWithBroker(c, broker, nil, numProvisionWorkersForTesting)
   335  
   336  	s.sendModelMachinesChange(c, "0")
   337  
   338  	select {
   339  	case <-started:
   340  	case <-time.After(coretesting.LongWait):
   341  		c.Fatalf("no matching call to StartInstance")
   342  	}
   343  
   344  	workertest.CleanKill(c, task)
   345  }
   346  
   347  func (s *ProvisionerTaskSuite) TestZoneConstraintsNoZoneAvailable(c *gc.C) {
   348  	ctrl := s.setUpMocks(c)
   349  	defer ctrl.Finish()
   350  
   351  	m0 := &testMachine{
   352  		id:          "0",
   353  		constraints: "zones=az9",
   354  	}
   355  	broker := s.setUpZonedEnviron(ctrl, m0)
   356  
   357  	// Constraint for availability zone az9 can not be satisfied;
   358  	// this broker only knows of az1, az2, az3.
   359  	azConstraints := newAZConstraintStartInstanceParamsMatcher("az9")
   360  	broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil)
   361  
   362  	task := s.newProvisionerTaskWithBroker(c, broker, nil, numProvisionWorkersForTesting)
   363  	s.sendModelMachinesChange(c, "0")
   364  	s.waitForWorkerSetup(c)
   365  
   366  	// Wait for instance status to be set.
   367  	timeout := time.After(coretesting.LongWait)
   368  	for msg := ""; msg == ""; {
   369  		select {
   370  		case <-time.After(coretesting.ShortWait):
   371  			_, msg, _ = m0.InstanceStatus()
   372  		case <-timeout:
   373  			c.Fatalf("machine InstanceStatus was not set")
   374  		}
   375  	}
   376  
   377  	_, msg, err := m0.InstanceStatus()
   378  	c.Assert(err, jc.ErrorIsNil)
   379  	c.Check(msg, gc.Equals, "suitable availability zone for machine 0 not found")
   380  
   381  	workertest.CleanKill(c, task)
   382  }
   383  
   384  func (s *ProvisionerTaskSuite) TestZoneConstraintsNoDistributionGroup(c *gc.C) {
   385  	ctrl := s.setUpMocks(c)
   386  	defer ctrl.Finish()
   387  
   388  	m0 := &testMachine{
   389  		id:          "0",
   390  		constraints: "zones=az1",
   391  	}
   392  	broker := s.setUpZonedEnviron(ctrl, m0)
   393  	azConstraints := newAZConstraintStartInstanceParamsMatcher("az1")
   394  	broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil)
   395  
   396  	// For the call to start instance, we expect the same zone constraint to
   397  	// be present, but we also expect that the zone in start instance params
   398  	// matches the constraint, based on being available in this environ.
   399  	azConstraintsAndDerivedZone := newAZConstraintStartInstanceParamsMatcher("az1")
   400  	azConstraintsAndDerivedZone.addMatch("availability zone: az1", func(p environs.StartInstanceParams) bool {
   401  		return p.AvailabilityZone == "az1"
   402  	})
   403  
   404  	// Use satisfaction of this call as the synchronisation point.
   405  	started := make(chan struct{})
   406  	broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(&environs.StartInstanceResult{
   407  		Instance: &testInstance{id: "instance-0"},
   408  	}, nil).Do(func(context.ProviderCallContext, environs.StartInstanceParams) {
   409  		go func() { started <- struct{}{} }()
   410  	})
   411  
   412  	task := s.newProvisionerTaskWithBroker(c, broker, nil, numProvisionWorkersForTesting)
   413  
   414  	s.sendModelMachinesChange(c, "0")
   415  
   416  	select {
   417  	case <-started:
   418  	case <-time.After(coretesting.LongWait):
   419  		c.Fatalf("no matching call to StartInstance")
   420  	}
   421  
   422  	workertest.CleanKill(c, task)
   423  }
   424  
   425  func (s *ProvisionerTaskSuite) TestZoneConstraintsNoDistributionGroupRetry(c *gc.C) {
   426  	ctrl := s.setUpMocks(c)
   427  	defer ctrl.Finish()
   428  
   429  	m0 := &testMachine{
   430  		id:          "0",
   431  		constraints: "zones=az1",
   432  	}
   433  	s.expectProvisioningInfo(m0)
   434  	s.taskAPI.EXPECT().MachinesWithTransientErrors().Return(
   435  		[]apiprovisioner.MachineStatusResult{{Machine: m0, Status: params.StatusResult{}}}, nil).MinTimes(1)
   436  
   437  	broker := s.setUpZonedEnviron(ctrl)
   438  	azConstraints := newAZConstraintStartInstanceParamsMatcher("az1")
   439  
   440  	failedErr := errors.Errorf("oh no")
   441  	// Use satisfaction of this call as the synchronisation point.
   442  	started := make(chan struct{})
   443  	gomock.InOrder(
   444  		broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil).AnyTimes(), // may be called multiple times due to the changes in the provisioner task main logic.
   445  		broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(nil, failedErr),
   446  		broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil).AnyTimes(), // may be called multiple times due to the changes in the provisioner task main logic.
   447  		broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(&environs.StartInstanceResult{
   448  			Instance: &testInstance{id: "instance-1"},
   449  		}, nil).Do(func(context.ProviderCallContext, environs.StartInstanceParams) {
   450  			go func() { started <- struct{}{} }()
   451  		}),
   452  	)
   453  
   454  	task := s.newProvisionerTaskWithBroker(c, broker, nil, numProvisionWorkersForTesting)
   455  
   456  	s.sendMachineErrorRetryChange(c)
   457  	s.sendMachineErrorRetryChange(c)
   458  
   459  	select {
   460  	case <-started:
   461  	case <-time.After(coretesting.LongWait):
   462  		c.Fatalf("no matching call to StartInstance")
   463  	}
   464  
   465  	workertest.CleanKill(c, task)
   466  }
   467  
   468  func (s *ProvisionerTaskSuite) TestZoneConstraintsWithDistributionGroup(c *gc.C) {
   469  	ctrl := s.setUpMocks(c)
   470  	defer ctrl.Finish()
   471  
   472  	m0 := &testMachine{
   473  		id:          "0",
   474  		constraints: "zones=az1,az2",
   475  	}
   476  
   477  	broker := s.setUpZonedEnviron(ctrl, m0)
   478  	azConstraints := newAZConstraintStartInstanceParamsMatcher("az1", "az2")
   479  	broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil)
   480  
   481  	// For the call to start instance, we expect the same zone constraints to
   482  	// be present, but we also expect that the zone in start instance params
   483  	// was selected from the constraints, based on a machine from the same
   484  	// distribution group already being in one of the zones.
   485  	azConstraintsAndDerivedZone := newAZConstraintStartInstanceParamsMatcher("az1", "az2")
   486  	azConstraintsAndDerivedZone.addMatch("availability zone: az2", func(p environs.StartInstanceParams) bool {
   487  		return p.AvailabilityZone == "az2"
   488  	})
   489  
   490  	// Use satisfaction of this call as the synchronisation point.
   491  	started := make(chan struct{})
   492  	broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(&environs.StartInstanceResult{
   493  		Instance: &testInstance{id: "instance-0"},
   494  	}, nil).Do(func(context.ProviderCallContext, environs.StartInstanceParams) {
   495  		go func() { started <- struct{}{} }()
   496  	})
   497  
   498  	// Another machine from the same distribution group is already in az1,
   499  	// so we expect the machine to be created in az2.
   500  	task := s.newProvisionerTaskWithBroker(c, broker, map[names.MachineTag][]string{
   501  		names.NewMachineTag("0"): {"az1"},
   502  	}, numProvisionWorkersForTesting)
   503  
   504  	s.sendModelMachinesChange(c, "0")
   505  	select {
   506  	case <-started:
   507  	case <-time.After(coretesting.LongWait):
   508  		c.Fatalf("no matching call to StartInstance")
   509  	}
   510  
   511  	workertest.CleanKill(c, task)
   512  }
   513  
   514  func (s *ProvisionerTaskSuite) TestZoneConstraintsWithDistributionGroupRetry(c *gc.C) {
   515  	ctrl := s.setUpMocks(c)
   516  	defer ctrl.Finish()
   517  
   518  	m0 := &testMachine{
   519  		id:          "0",
   520  		constraints: "zones=az1,az2",
   521  	}
   522  	s.expectProvisioningInfo(m0)
   523  	s.taskAPI.EXPECT().MachinesWithTransientErrors().Return(
   524  		[]apiprovisioner.MachineStatusResult{{Machine: m0, Status: params.StatusResult{}}}, nil).MinTimes(1)
   525  
   526  	broker := s.setUpZonedEnviron(ctrl)
   527  	azConstraints := newAZConstraintStartInstanceParamsMatcher("az1", "az2")
   528  
   529  	// Use satisfaction of this call as the synchronisation point.
   530  	failedErr := errors.Errorf("oh no")
   531  	started := make(chan struct{})
   532  	gomock.InOrder(
   533  		broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil).AnyTimes(), // may be called multiple times due to the changes in the provisioner task main logic.
   534  		broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(nil, failedErr),
   535  		broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil).AnyTimes(), // may be called multiple times due to the changes in the provisioner task main logic.
   536  		broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(&environs.StartInstanceResult{
   537  			Instance: &testInstance{id: "instance-1"},
   538  		}, nil).Do(func(context.ProviderCallContext, environs.StartInstanceParams) {
   539  			go func() { started <- struct{}{} }()
   540  		}),
   541  	)
   542  
   543  	// Another machine from the same distribution group is already in az1,
   544  	// so we expect the machine to be created in az2.
   545  	task := s.newProvisionerTaskWithBroker(c, broker, map[names.MachineTag][]string{
   546  		names.NewMachineTag("0"): {"az1"},
   547  	}, numProvisionWorkersForTesting)
   548  
   549  	s.sendMachineErrorRetryChange(c)
   550  	s.sendMachineErrorRetryChange(c)
   551  
   552  	select {
   553  	case <-started:
   554  	case <-time.After(coretesting.LongWait):
   555  		c.Fatalf("no matching call to StartInstance")
   556  	}
   557  
   558  	workertest.CleanKill(c, task)
   559  }
   560  
   561  func (s *ProvisionerTaskSuite) TestZoneRestrictiveConstraintsWithDistributionGroupRetry(c *gc.C) {
   562  	ctrl := s.setUpMocks(c)
   563  	defer ctrl.Finish()
   564  
   565  	m0 := &testMachine{
   566  		id:          "0",
   567  		constraints: "zones=az2",
   568  	}
   569  	s.expectProvisioningInfo(m0)
   570  	s.taskAPI.EXPECT().MachinesWithTransientErrors().Return(
   571  		[]apiprovisioner.MachineStatusResult{{Machine: m0, Status: params.StatusResult{}}}, nil).MinTimes(1)
   572  
   573  	broker := s.setUpZonedEnviron(ctrl)
   574  	azConstraints := newAZConstraintStartInstanceParamsMatcher("az2")
   575  
   576  	// Use satisfaction of this call as the synchronisation point.
   577  	failedErr := errors.Errorf("oh no")
   578  	started := make(chan struct{})
   579  	gomock.InOrder(
   580  		broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil).AnyTimes(), // may be called multiple times due to the changes in the provisioner task main logic.
   581  		broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(nil, failedErr),
   582  		broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil).AnyTimes(), // may be called multiple times due to the changes in the provisioner task main logic.
   583  		broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(&environs.StartInstanceResult{
   584  			Instance: &testInstance{id: "instance-2"},
   585  		}, nil).Do(func(context.ProviderCallContext, environs.StartInstanceParams) {
   586  			go func() { started <- struct{}{} }()
   587  		}),
   588  	)
   589  
   590  	// Another machine from the same distribution group is already in az1,
   591  	// so we expect the machine to be created in az2.
   592  	task := s.newProvisionerTaskWithBroker(c, broker, map[names.MachineTag][]string{
   593  		names.NewMachineTag("0"): {"az2"},
   594  		names.NewMachineTag("1"): {"az3"},
   595  	}, numProvisionWorkersForTesting)
   596  
   597  	s.sendMachineErrorRetryChange(c)
   598  	s.sendMachineErrorRetryChange(c)
   599  
   600  	select {
   601  	case <-started:
   602  	case <-time.After(coretesting.LongWait):
   603  		c.Fatalf("no matching call to StartInstance")
   604  	}
   605  
   606  	workertest.CleanKill(c, task)
   607  }
   608  
   609  func (s *ProvisionerTaskSuite) TestPopulateAZMachinesErrorWorkerStopped(c *gc.C) {
   610  	ctrl := gomock.NewController(c)
   611  	defer ctrl.Finish()
   612  
   613  	broker := providermocks.NewMockZonedEnviron(ctrl)
   614  	broker.EXPECT().AllRunningInstances(s.callCtx).Return(nil, errors.New("boom")).Do(func(context.ProviderCallContext) {
   615  		go func() { close(s.setupDone) }()
   616  	})
   617  
   618  	task := s.newProvisionerTaskWithBroker(c, broker, map[names.MachineTag][]string{
   619  		names.NewMachineTag("0"): {"az1"},
   620  	}, numProvisionWorkersForTesting)
   621  	s.sendModelMachinesChange(c, "0")
   622  	s.waitForWorkerSetup(c)
   623  
   624  	err := workertest.CheckKill(c, task)
   625  	c.Assert(err, gc.ErrorMatches, "processing updated machines: getting all instances from broker: boom")
   626  }
   627  
   628  func (s *ProvisionerTaskSuite) TestDedupStopRequests(c *gc.C) {
   629  	ctrl := s.setUpMocks(c)
   630  	defer ctrl.Finish()
   631  
   632  	// m0 is a machine that should be terminated.
   633  	i0 := &testInstance{id: "0"}
   634  	s.instances = []instances.Instance{i0}
   635  	m0 := &testMachine{
   636  		id:       "0",
   637  		life:     life.Dead,
   638  		instance: i0,
   639  	}
   640  	broker := s.setUpZonedEnviron(ctrl, m0)
   641  
   642  	// This is a complex scenario. Here is how everything is set up:
   643  	//
   644  	// We will register an event processed callback as a synchronization
   645  	// point.
   646  	//
   647  	// The first machine change event will trigger a StopInstance call
   648  	// against the broker. While in that call (i.e. the machine is still
   649  	// being stopped from the provisioner's perspective), we will trigger
   650  	// another machine change event for the same machine and wait until it
   651  	// has been processed and the event processed callback invoked.
   652  	//
   653  	// Then, doneCh which instructs the test to perform a CleanKill for
   654  	// the worker and make sure that no errors got reported.
   655  	//
   656  	// This verifies that machines being stopped are ignored when processing
   657  	// machine changes concurrently.
   658  
   659  	doneCh := make(chan struct{})
   660  	barrier := make(chan struct{}, 1)
   661  	var barrierCb = func(evt string) {
   662  		if evt == "processed-machines" {
   663  			barrier <- struct{}{}
   664  		}
   665  	}
   666  
   667  	// StopInstances should only be called once for m0.
   668  	broker.EXPECT().StopInstances(s.callCtx, gomock.Any()).Do(func(ctx interface{}, ids ...interface{}) {
   669  		c.Assert(len(ids), gc.Equals, 1)
   670  		c.Assert(ids[0], gc.DeepEquals, instance.Id("0"))
   671  
   672  		// While one of the pool workers is executing this code, we
   673  		// will wait until the machine change event gets processed
   674  		// and the main loop is ready to process the next event.
   675  		select {
   676  		case <-barrier:
   677  		case <-time.After(coretesting.LongWait):
   678  			c.Errorf("timed out waiting for first processed-machines event")
   679  		}
   680  
   681  		// Trigger another change while machine 0 is still being stopped
   682  		// and wait until the event has been processed by the provisioner
   683  		// main loop before returning
   684  		s.sendModelMachinesChange(c, "0")
   685  		select {
   686  		case <-barrier:
   687  		case <-time.After(coretesting.LongWait):
   688  			c.Errorf("timed out waiting for second processed-machines event")
   689  		}
   690  		close(doneCh)
   691  	})
   692  
   693  	task := s.newProvisionerTaskWithBrokerAndEventCb(c, broker, nil, numProvisionWorkersForTesting, barrierCb)
   694  
   695  	s.sendModelMachinesChange(c, "0")
   696  
   697  	// This ensures that the worker pool within the provisioner gets cleanly
   698  	// shutdown and that any pending requests are processed.
   699  	select {
   700  	case <-doneCh:
   701  	case <-time.After(3 * coretesting.LongWait):
   702  		c.Errorf("timed out waiting for work to complete")
   703  	}
   704  	workertest.CleanKill(c, task)
   705  }
   706  
   707  func (s *ProvisionerTaskSuite) TestDeferStopRequestsForMachinesStillProvisioning(c *gc.C) {
   708  	ctrl := s.setUpMocks(c)
   709  	defer ctrl.Finish()
   710  
   711  	s.instances = []instances.Instance{&testInstance{id: "0"}}
   712  
   713  	// m0 is a machine that should be started.
   714  	m0 := &testMachine{
   715  		id:          "0",
   716  		life:        life.Alive,
   717  		constraints: "zones=az1",
   718  	}
   719  
   720  	broker := s.setUpZonedEnviron(ctrl, m0)
   721  
   722  	// This is a complex scenario to ensure the provisioner works as expected
   723  	// when the equivalent of "juju add-machine; juju remove-machine 0" is
   724  	// executed. Here is how everything is set up:
   725  	//
   726  	// We will register an event processed callback as a synchronization
   727  	// point.
   728  	//
   729  	// Machine 0 is alive but not yes started. Processing the first machine
   730  	// change will trigger a StartInstance call against the broker.  While
   731  	// in that call (i.e. the machine is still being started from the
   732  	// provisioner's perspective), we will set the machine as dead, queue a
   733  	// change event for the same machine and wait until it has been
   734  	// processed and the event processed callback invoked.
   735  	//
   736  	// The change event for the dead machine should not immediately trigger
   737  	// a StopInstance call but rather the provisioner will detect that the
   738  	// machine is still being started and defer the stopping of the machine
   739  	// until the machine gets started (immediately when StartInstance
   740  	// returns).
   741  	//
   742  	// Finally, doneCh which instructs the test to perform a CleanKill for
   743  	// the worker and make sure that no errors got reported.
   744  
   745  	doneCh := make(chan struct{})
   746  	barrier := make(chan struct{}, 1)
   747  	var barrierCb = func(evt string) {
   748  		if evt == "processed-machines" {
   749  			barrier <- struct{}{}
   750  		}
   751  	}
   752  
   753  	azConstraints := newAZConstraintStartInstanceParamsMatcher("az1")
   754  	broker.EXPECT().DeriveAvailabilityZones(s.callCtx, azConstraints).Return([]string{}, nil).AnyTimes()
   755  	gomock.InOrder(
   756  		broker.EXPECT().StartInstance(s.callCtx, azConstraints).Return(&environs.StartInstanceResult{
   757  			Instance: &testInstance{id: "instance-0"},
   758  		}, nil).Do(func(ctx, params interface{}) {
   759  			// While one of the pool workers is executing this code, we
   760  			// will wait until the machine change event gets processed
   761  			// and the main loop is ready to process the next event.
   762  			select {
   763  			case <-barrier:
   764  			case <-time.After(coretesting.LongWait):
   765  				c.Errorf("timed out waiting for first processed-machines event")
   766  			}
   767  
   768  			// While the machine is still starting, flag it as dead,
   769  			// trigger another change and wait for it to be processed.
   770  			// We expect that the defer stop flag is going to be set for
   771  			// the machine and a StopInstance call to be issued once we
   772  			// return.
   773  			m0.life = life.Dead
   774  			s.sendModelMachinesChange(c, "0")
   775  			select {
   776  			case <-barrier:
   777  			case <-time.After(coretesting.LongWait):
   778  				c.Errorf("timed out waiting for second processed-machines event")
   779  			}
   780  		}),
   781  		broker.EXPECT().StopInstances(s.callCtx, gomock.Any()).Do(func(ctx interface{}, ids ...interface{}) {
   782  			c.Assert(len(ids), gc.Equals, 1)
   783  			c.Assert(ids[0], gc.DeepEquals, instance.Id("0"))
   784  
   785  			// Signal the test to shut down the worker.
   786  			close(doneCh)
   787  		}),
   788  	)
   789  
   790  	task := s.newProvisionerTaskWithBrokerAndEventCb(c, broker, nil, numProvisionWorkersForTesting, barrierCb)
   791  
   792  	// Send change for machine 0
   793  	s.sendModelMachinesChange(c, "0")
   794  
   795  	// This ensures that the worker pool within the provisioner gets cleanly
   796  	// shutdown and that any pending requests are processed.
   797  	select {
   798  	case <-doneCh:
   799  	case <-time.After(3 * coretesting.LongWait):
   800  		c.Errorf("timed out waiting for work to complete")
   801  	}
   802  	workertest.CleanKill(c, task)
   803  }
   804  
   805  func (s *ProvisionerTaskSuite) TestResizeWorkerPool(c *gc.C) {
   806  	ctrl := gomock.NewController(c)
   807  	defer ctrl.Finish()
   808  
   809  	barrier := make(chan struct{}, 1)
   810  	var barrierCb = func(evt string) {
   811  		if evt == "resized-worker-pool" {
   812  			close(barrier)
   813  		}
   814  	}
   815  
   816  	broker := s.setUpZonedEnviron(ctrl)
   817  	task := s.newProvisionerTaskWithBrokerAndEventCb(c, broker, nil, numProvisionWorkersForTesting, barrierCb)
   818  
   819  	// Resize the pool
   820  	task.SetNumProvisionWorkers(numProvisionWorkersForTesting + 1)
   821  
   822  	<-barrier
   823  	workertest.CleanKill(c, task)
   824  }
   825  
   826  func (s *ProvisionerTaskSuite) TestUpdatedZonesReflectedInAZMachineSlice(c *gc.C) {
   827  	ctrl := s.setUpMocks(c)
   828  	defer ctrl.Finish()
   829  
   830  	s.instances = []instances.Instance{&testInstance{id: "0"}}
   831  	m0 := &testMachine{id: "0", life: life.Alive}
   832  	s.expectMachines(m0)
   833  	s.expectProvisioningInfo(m0)
   834  
   835  	broker := providermocks.NewMockZonedEnviron(ctrl)
   836  	exp := broker.EXPECT()
   837  
   838  	exp.AllRunningInstances(s.callCtx).Return(s.instances, nil).MinTimes(1)
   839  	exp.InstanceAvailabilityZoneNames(s.callCtx, []instance.Id{s.instances[0].Id()}).Return(
   840  		map[instance.Id]string{}, nil).Do(func(context.ProviderCallContext, []instance.Id) { close(s.setupDone) })
   841  
   842  	az1 := providermocks.NewMockAvailabilityZone(ctrl)
   843  	az1.EXPECT().Name().Return("az1").MinTimes(1)
   844  	az1.EXPECT().Available().Return(true).MinTimes(1)
   845  
   846  	az2 := providermocks.NewMockAvailabilityZone(ctrl)
   847  	az2.EXPECT().Name().Return("az2").MinTimes(1)
   848  	az2.EXPECT().Available().Return(true).MinTimes(1)
   849  
   850  	az3 := providermocks.NewMockAvailabilityZone(ctrl)
   851  	az3.EXPECT().Name().Return("az3").MinTimes(1)
   852  	az3.EXPECT().Available().Return(true).MinTimes(1)
   853  
   854  	// Return 1 availability zone on the first call, then 3, then 1 again.
   855  	// See steps below punctuated by sending machine changes.
   856  	gomock.InOrder(
   857  		exp.AvailabilityZones(s.callCtx).Return(network.AvailabilityZones{az1}, nil),
   858  		exp.AvailabilityZones(s.callCtx).Return(network.AvailabilityZones{az1, az2, az3}, nil),
   859  		exp.AvailabilityZones(s.callCtx).Return(network.AvailabilityZones{az1}, nil),
   860  	)
   861  
   862  	step := make(chan struct{}, 1)
   863  
   864  	// We really don't care about these calls.
   865  	// StartInstance is just a synchronisation point.
   866  	exp.DeriveAvailabilityZones(s.callCtx, gomock.Any()).Return([]string{}, nil).AnyTimes()
   867  	exp.StartInstance(s.callCtx, gomock.Any()).Return(&environs.StartInstanceResult{
   868  		Instance: &testInstance{id: "instance-0"},
   869  	}, nil).AnyTimes().Do(func(context.ProviderCallContext, environs.StartInstanceParams) {
   870  		select {
   871  		case step <- struct{}{}:
   872  		case <-time.After(testing.LongWait):
   873  			c.Fatalf("timed out writing to step channel")
   874  		}
   875  	})
   876  
   877  	task := s.newProvisionerTaskWithBroker(c, broker, nil, numProvisionWorkersForTesting)
   878  
   879  	syncStep := func() {
   880  		select {
   881  		case <-step:
   882  		case <-time.After(testing.LongWait):
   883  			c.Fatalf("timed out reading from step channel")
   884  		}
   885  	}
   886  
   887  	s.sendModelMachinesChange(c, "0")
   888  
   889  	// After the first change, there is only one AZ in the tracker.
   890  	syncStep()
   891  	azm := provisioner.GetCopyAvailabilityZoneMachines(task)
   892  	c.Assert(azm, gc.HasLen, 1)
   893  	c.Assert(azm[0].ZoneName, gc.Equals, "az1")
   894  
   895  	s.sendModelMachinesChange(c, "0")
   896  
   897  	// After the second change, we see all 3 AZs.
   898  	syncStep()
   899  	azm = provisioner.GetCopyAvailabilityZoneMachines(task)
   900  	c.Assert(azm, gc.HasLen, 3)
   901  	c.Assert([]string{azm[0].ZoneName, azm[1].ZoneName, azm[2].ZoneName}, jc.SameContents, []string{"az1", "az2", "az3"})
   902  
   903  	s.sendModelMachinesChange(c, "0")
   904  
   905  	// At this point, we will have had a deployment to one of the zones added
   906  	// in the prior step. This means one will be removed from tracking,
   907  	// but the one we deployed to will not be deleted.
   908  	syncStep()
   909  	azm = provisioner.GetCopyAvailabilityZoneMachines(task)
   910  	c.Assert(azm, gc.HasLen, 2)
   911  
   912  	workertest.CleanKill(c, task)
   913  }
   914  
   915  // setUpZonedEnviron creates a mock environ with instances based on those set
   916  // on the test suite, and 3 availability zones.
   917  func (s *ProvisionerTaskSuite) setUpZonedEnviron(ctrl *gomock.Controller, machines ...*testMachine) *providermocks.MockZonedEnviron {
   918  	broker := providermocks.NewMockZonedEnviron(ctrl)
   919  	if len(machines) == 0 {
   920  		return broker
   921  	}
   922  
   923  	s.expectMachines(machines...)
   924  	s.expectProvisioningInfo(machines...)
   925  
   926  	instanceIds := make([]instance.Id, len(s.instances))
   927  	for i, inst := range s.instances {
   928  		instanceIds[i] = inst.Id()
   929  	}
   930  
   931  	// Environ has 3 availability zones: az1, az2, az3.
   932  	zones := make(network.AvailabilityZones, 3)
   933  	for i := 0; i < 3; i++ {
   934  		az := providermocks.NewMockAvailabilityZone(ctrl)
   935  		az.EXPECT().Name().Return(fmt.Sprintf("az%d", i+1)).MinTimes(1)
   936  		az.EXPECT().Available().Return(true).MinTimes(1)
   937  		zones[i] = az
   938  	}
   939  
   940  	exp := broker.EXPECT()
   941  	exp.AllRunningInstances(s.callCtx).Return(s.instances, nil).MinTimes(1)
   942  	exp.InstanceAvailabilityZoneNames(s.callCtx, instanceIds).Return(map[instance.Id]string{}, nil).Do(
   943  		func(context.ProviderCallContext, []instance.Id) { close(s.setupDone) },
   944  	)
   945  	exp.AvailabilityZones(s.callCtx).Return(zones, nil).MinTimes(1)
   946  	return broker
   947  }
   948  
   949  func (s *ProvisionerTaskSuite) waitForWorkerSetup(c *gc.C) {
   950  	select {
   951  	case <-s.setupDone:
   952  	case <-time.After(coretesting.LongWait):
   953  		c.Fatalf("worker not set up")
   954  	}
   955  }
   956  
   957  func (s *ProvisionerTaskSuite) waitForTask(c *gc.C, expectedCalls []string) {
   958  	var calls []string
   959  	for {
   960  		select {
   961  		case call := <-s.instanceBroker.callsChan:
   962  			calls = append(calls, call)
   963  		case <-time.After(coretesting.LongWait):
   964  			c.Fatalf("stopping worker chan didn't stop")
   965  		}
   966  		if reflect.DeepEqual(expectedCalls, calls) {
   967  			// we are done
   968  			break
   969  		}
   970  	}
   971  }
   972  
   973  func (s *ProvisionerTaskSuite) sendModelMachinesChange(c *gc.C, ids ...string) {
   974  	select {
   975  	case s.modelMachinesChanges <- ids:
   976  	case <-time.After(coretesting.LongWait):
   977  		c.Fatal("timed out sending model machines change")
   978  	}
   979  }
   980  
   981  func (s *ProvisionerTaskSuite) sendMachineErrorRetryChange(c *gc.C) {
   982  	select {
   983  	case s.machineErrorRetryChanges <- struct{}{}:
   984  	case <-time.After(coretesting.LongWait):
   985  		c.Fatal("timed out sending machine error retry change")
   986  	}
   987  }
   988  
   989  func (s *ProvisionerTaskSuite) newProvisionerTask(
   990  	c *gc.C,
   991  	harvestingMethod config.HarvestMode,
   992  	distributionGroupFinder provisioner.DistributionGroupFinder,
   993  	toolsFinder provisioner.ToolsFinder,
   994  	numProvisionWorkers int,
   995  ) provisioner.ProvisionerTask {
   996  	return s.newProvisionerTaskWithRetry(c,
   997  		harvestingMethod,
   998  		distributionGroupFinder,
   999  		toolsFinder,
  1000  		provisioner.NewRetryStrategy(0*time.Second, 0),
  1001  		numProvisionWorkers,
  1002  	)
  1003  }
  1004  
  1005  func (s *ProvisionerTaskSuite) newProvisionerTaskWithRetry(
  1006  	c *gc.C,
  1007  	harvestingMethod config.HarvestMode,
  1008  	distributionGroupFinder provisioner.DistributionGroupFinder,
  1009  	toolsFinder provisioner.ToolsFinder,
  1010  	retryStrategy provisioner.RetryStrategy,
  1011  	numProvisionWorkers int,
  1012  ) provisioner.ProvisionerTask {
  1013  	w, err := provisioner.NewProvisionerTask(provisioner.TaskConfig{
  1014  		ControllerUUID:             coretesting.ControllerTag.Id(),
  1015  		HostTag:                    names.NewMachineTag("0"),
  1016  		Logger:                     loggo.GetLogger("test"),
  1017  		HarvestMode:                harvestingMethod,
  1018  		TaskAPI:                    s.taskAPI,
  1019  		DistributionGroupFinder:    distributionGroupFinder,
  1020  		ToolsFinder:                toolsFinder,
  1021  		MachineWatcher:             s.modelMachinesWatcher,
  1022  		RetryWatcher:               s.machineErrorRetryWatcher,
  1023  		Broker:                     s.instanceBroker,
  1024  		Auth:                       s.auth,
  1025  		ImageStream:                imagemetadata.ReleasedStream,
  1026  		RetryStartInstanceStrategy: retryStrategy,
  1027  		CloudCallContextFunc:       func(_ stdcontext.Context) context.ProviderCallContext { return s.callCtx },
  1028  		NumProvisionWorkers:        numProvisionWorkers,
  1029  	})
  1030  	c.Assert(err, jc.ErrorIsNil)
  1031  	return w
  1032  }
  1033  
  1034  func (s *ProvisionerTaskSuite) newProvisionerTaskWithBroker(c *gc.C, broker environs.InstanceBroker, distributionGroups map[names.MachineTag][]string, numProvisionWorkers int) provisioner.ProvisionerTask {
  1035  	return s.newProvisionerTaskWithBrokerAndEventCb(c, broker, distributionGroups, numProvisionWorkers, nil)
  1036  }
  1037  
  1038  func (s *ProvisionerTaskSuite) newProvisionerTaskWithBrokerAndEventCb(
  1039  	c *gc.C,
  1040  	broker environs.InstanceBroker,
  1041  	distributionGroups map[names.MachineTag][]string,
  1042  	numProvisionWorkers int,
  1043  	evtCb func(string),
  1044  ) provisioner.ProvisionerTask {
  1045  	task, err := provisioner.NewProvisionerTask(provisioner.TaskConfig{
  1046  		ControllerUUID:             coretesting.ControllerTag.Id(),
  1047  		HostTag:                    names.NewMachineTag("0"),
  1048  		Logger:                     loggo.GetLogger("test"),
  1049  		HarvestMode:                config.HarvestAll,
  1050  		TaskAPI:                    s.taskAPI,
  1051  		DistributionGroupFinder:    &mockDistributionGroupFinder{groups: distributionGroups},
  1052  		ToolsFinder:                mockToolsFinder{},
  1053  		MachineWatcher:             s.modelMachinesWatcher,
  1054  		RetryWatcher:               s.machineErrorRetryWatcher,
  1055  		Broker:                     broker,
  1056  		Auth:                       s.auth,
  1057  		ImageStream:                imagemetadata.ReleasedStream,
  1058  		RetryStartInstanceStrategy: provisioner.NewRetryStrategy(0*time.Second, 0),
  1059  		CloudCallContextFunc:       func(_ stdcontext.Context) context.ProviderCallContext { return s.callCtx },
  1060  		NumProvisionWorkers:        numProvisionWorkers,
  1061  		EventProcessedCb:           evtCb,
  1062  	})
  1063  	c.Assert(err, jc.ErrorIsNil)
  1064  	return task
  1065  }
  1066  
  1067  func (s *ProvisionerTaskSuite) setUpMocks(c *gc.C) *gomock.Controller {
  1068  	ctrl := gomock.NewController(c)
  1069  	s.taskAPI = mocks.NewMockTaskAPI(ctrl)
  1070  	return ctrl
  1071  }
  1072  
  1073  func (s *ProvisionerTaskSuite) expectMachines(machines ...*testMachine) {
  1074  	tags := transform.Slice(machines, func(m *testMachine) names.MachineTag {
  1075  		return names.NewMachineTag(m.id)
  1076  	})
  1077  
  1078  	mResults := transform.Slice(machines, func(m *testMachine) apiprovisioner.MachineResult {
  1079  		return apiprovisioner.MachineResult{Machine: m}
  1080  	})
  1081  
  1082  	s.taskAPI.EXPECT().Machines(tags).Return(mResults, nil).MinTimes(1)
  1083  }
  1084  
  1085  func (s *ProvisionerTaskSuite) expectProvisioningInfo(machines ...*testMachine) {
  1086  	tags := transform.Slice(machines, func(m *testMachine) names.MachineTag {
  1087  		return names.NewMachineTag(m.id)
  1088  	})
  1089  
  1090  	base, _ := corebase.GetBaseFromSeries(jujuversion.DefaultSupportedLTS())
  1091  
  1092  	piResults := transform.Slice(machines, func(m *testMachine) params.ProvisioningInfoResult {
  1093  		return params.ProvisioningInfoResult{
  1094  			Result: &params.ProvisioningInfo{
  1095  				ControllerConfig:            coretesting.FakeControllerConfig(),
  1096  				Base:                        params.Base{Name: base.OS, Channel: base.Channel.String()},
  1097  				Constraints:                 constraints.MustParse(m.constraints),
  1098  				ProvisioningNetworkTopology: m.topology,
  1099  			},
  1100  			Error: nil,
  1101  		}
  1102  	})
  1103  
  1104  	s.taskAPI.EXPECT().ProvisioningInfo(tags).Return(
  1105  		params.ProvisioningInfoResults{Results: piResults}, nil).AnyTimes()
  1106  }
  1107  
  1108  type testInstanceBroker struct {
  1109  	*testing.Stub
  1110  
  1111  	callsChan        chan string
  1112  	allInstancesFunc func(ctx context.ProviderCallContext) ([]instances.Instance, error)
  1113  }
  1114  
  1115  func (t *testInstanceBroker) StartInstance(ctx context.ProviderCallContext, args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
  1116  	t.AddCall("StartInstance", ctx, args)
  1117  	t.callsChan <- "StartInstance"
  1118  	return nil, t.NextErr()
  1119  }
  1120  
  1121  func (t *testInstanceBroker) StopInstances(ctx context.ProviderCallContext, ids ...instance.Id) error {
  1122  	t.AddCall("StopInstances", ctx, ids)
  1123  	t.callsChan <- "StopInstances"
  1124  	return t.NextErr()
  1125  }
  1126  
  1127  func (t *testInstanceBroker) AllInstances(ctx context.ProviderCallContext) ([]instances.Instance, error) {
  1128  	t.AddCall("AllInstances", ctx)
  1129  	t.callsChan <- "AllInstances"
  1130  	return t.allInstancesFunc(ctx)
  1131  }
  1132  
  1133  func (t *testInstanceBroker) AllRunningInstances(ctx context.ProviderCallContext) ([]instances.Instance, error) {
  1134  	t.AddCall("AllRunningInstances", ctx)
  1135  	t.callsChan <- "AllRunningInstances"
  1136  	return t.allInstancesFunc(ctx)
  1137  }
  1138  
  1139  type testInstance struct {
  1140  	instances.Instance
  1141  	id string
  1142  }
  1143  
  1144  func (i *testInstance) Id() instance.Id {
  1145  	return instance.Id(i.id)
  1146  }
  1147  
  1148  type testMachine struct {
  1149  	*apiprovisioner.Machine
  1150  
  1151  	mu sync.Mutex
  1152  
  1153  	id             string
  1154  	life           life.Value
  1155  	instance       *testInstance
  1156  	keepInstance   bool
  1157  	markForRemoval bool
  1158  	constraints    string
  1159  	instStatusMsg  string
  1160  	modStatusMsg   string
  1161  	topology       params.ProvisioningNetworkTopology
  1162  }
  1163  
  1164  func (m *testMachine) Id() string {
  1165  	return m.id
  1166  }
  1167  
  1168  func (m *testMachine) String() string {
  1169  	return m.Id()
  1170  }
  1171  
  1172  func (m *testMachine) Life() life.Value {
  1173  	return m.life
  1174  }
  1175  
  1176  func (m *testMachine) InstanceId() (instance.Id, error) {
  1177  	if m.instance == nil {
  1178  		return "", params.Error{Code: "not provisioned"}
  1179  	}
  1180  	return m.instance.Id(), nil
  1181  }
  1182  
  1183  func (m *testMachine) InstanceNames() (instance.Id, string, error) {
  1184  	instId, err := m.InstanceId()
  1185  	return instId, "", err
  1186  }
  1187  
  1188  func (m *testMachine) KeepInstance() (bool, error) {
  1189  	return m.keepInstance, nil
  1190  }
  1191  
  1192  func (m *testMachine) MarkForRemoval() error {
  1193  	m.markForRemoval = true
  1194  	return nil
  1195  }
  1196  
  1197  func (m *testMachine) Tag() names.Tag {
  1198  	return m.MachineTag()
  1199  }
  1200  
  1201  func (m *testMachine) MachineTag() names.MachineTag {
  1202  	return names.NewMachineTag(m.id)
  1203  }
  1204  
  1205  func (m *testMachine) SetInstanceStatus(_ status.Status, message string, _ map[string]interface{}) error {
  1206  	m.mu.Lock()
  1207  	m.instStatusMsg = message
  1208  	m.mu.Unlock()
  1209  	return nil
  1210  }
  1211  
  1212  func (m *testMachine) InstanceStatus() (status.Status, string, error) {
  1213  	m.mu.Lock()
  1214  	defer m.mu.Unlock()
  1215  	return "", m.instStatusMsg, nil
  1216  }
  1217  
  1218  func (m *testMachine) SetModificationStatus(_ status.Status, message string, _ map[string]interface{}) error {
  1219  	m.mu.Lock()
  1220  	m.modStatusMsg = message
  1221  	m.mu.Unlock()
  1222  	return nil
  1223  }
  1224  
  1225  func (m *testMachine) ModificationStatus() (status.Status, string, error) {
  1226  	m.mu.Lock()
  1227  	defer m.mu.Unlock()
  1228  	return "", m.modStatusMsg, nil
  1229  }
  1230  
  1231  func (m *testMachine) SetStatus(_ status.Status, _ string, _ map[string]interface{}) error {
  1232  	return nil
  1233  }
  1234  
  1235  func (m *testMachine) Status() (status.Status, string, error) {
  1236  	return "pending", "", nil
  1237  }
  1238  
  1239  func (m *testMachine) ModelAgentVersion() (*version.Number, error) {
  1240  	return &coretesting.FakeVersionNumber, nil
  1241  }
  1242  
  1243  func (m *testMachine) SetInstanceInfo(
  1244  	_ instance.Id, _ string, _ string, _ *instance.HardwareCharacteristics, _ []params.NetworkConfig, _ []params.Volume,
  1245  	_ map[string]params.VolumeAttachmentInfo, _ []string,
  1246  ) error {
  1247  	return nil
  1248  }
  1249  
  1250  type testAuthenticationProvider struct {
  1251  	*testing.Stub
  1252  }
  1253  
  1254  func (m *testAuthenticationProvider) SetupAuthentication(
  1255  	machine authentication.TaggedPasswordChanger,
  1256  ) (*api.Info, error) {
  1257  	m.AddCall("SetupAuthentication", machine)
  1258  	return nil, nil
  1259  }
  1260  
  1261  // startInstanceParamsMatcher is a GoMock matcher that applies a collection of
  1262  // conditions to an environs.StartInstanceParams.
  1263  // All conditions must be true in order for a positive match.
  1264  type startInstanceParamsMatcher struct {
  1265  	matchers map[string]func(environs.StartInstanceParams) bool
  1266  	failMsg  string
  1267  }
  1268  
  1269  func (m *startInstanceParamsMatcher) Matches(params interface{}) bool {
  1270  	siParams := params.(environs.StartInstanceParams)
  1271  	for msg, match := range m.matchers {
  1272  		if !match(siParams) {
  1273  			m.failMsg = msg
  1274  			return false
  1275  		}
  1276  	}
  1277  	return true
  1278  }
  1279  
  1280  func (m *startInstanceParamsMatcher) String() string {
  1281  	return m.failMsg
  1282  }
  1283  
  1284  func (m *startInstanceParamsMatcher) addMatch(msg string, match func(environs.StartInstanceParams) bool) {
  1285  	m.matchers[msg] = match
  1286  }
  1287  
  1288  // newAZConstraintStartInstanceParamsMatcher returns a matcher that tests
  1289  // whether the candidate environs.StartInstanceParams has a constraints value
  1290  // that includes exactly the input zones.
  1291  func newAZConstraintStartInstanceParamsMatcher(zones ...string) *startInstanceParamsMatcher {
  1292  	match := func(p environs.StartInstanceParams) bool {
  1293  		if !p.Constraints.HasZones() {
  1294  			return len(zones) == 0
  1295  		}
  1296  		cZones := *p.Constraints.Zones
  1297  		if len(cZones) != len(zones) {
  1298  			return false
  1299  		}
  1300  		for _, z := range zones {
  1301  			found := false
  1302  			for _, cz := range cZones {
  1303  				if z == cz {
  1304  					found = true
  1305  					break
  1306  				}
  1307  			}
  1308  			if !found {
  1309  				return false
  1310  			}
  1311  		}
  1312  		return true
  1313  	}
  1314  	return newStartInstanceParamsMatcher(map[string]func(environs.StartInstanceParams) bool{
  1315  		fmt.Sprint("AZ constraints:", strings.Join(zones, ", ")): match,
  1316  	})
  1317  }
  1318  
  1319  func newSpaceConstraintStartInstanceParamsMatcher(spaces ...string) *startInstanceParamsMatcher {
  1320  	match := func(p environs.StartInstanceParams) bool {
  1321  		if !p.Constraints.HasSpaces() {
  1322  			return false
  1323  		}
  1324  		cSpaces := p.Constraints.IncludeSpaces()
  1325  		if len(cSpaces) != len(spaces) {
  1326  			return false
  1327  		}
  1328  		for _, s := range spaces {
  1329  			found := false
  1330  			for _, cs := range spaces {
  1331  				if s == cs {
  1332  					found = true
  1333  					break
  1334  				}
  1335  			}
  1336  			if !found {
  1337  				return false
  1338  			}
  1339  		}
  1340  		return true
  1341  	}
  1342  	return newStartInstanceParamsMatcher(map[string]func(environs.StartInstanceParams) bool{
  1343  		fmt.Sprint("space constraints:", strings.Join(spaces, ", ")): match,
  1344  	})
  1345  }
  1346  
  1347  func newStartInstanceParamsMatcher(
  1348  	matchers map[string]func(environs.StartInstanceParams) bool,
  1349  ) *startInstanceParamsMatcher {
  1350  	if matchers == nil {
  1351  		matchers = make(map[string]func(environs.StartInstanceParams) bool)
  1352  	}
  1353  	return &startInstanceParamsMatcher{matchers: matchers}
  1354  }