github.com/juju/juju@v0.0.0-20240430160146-1752b71fcf00/worker/instancepoller/worker_test.go (about)

     1  // Copyright 2013 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package instancepoller
     5  
     6  import (
     7  	"fmt"
     8  	"time"
     9  
    10  	"github.com/juju/clock/testclock"
    11  	"github.com/juju/errors"
    12  	"github.com/juju/loggo"
    13  	"github.com/juju/names/v5"
    14  	jc "github.com/juju/testing/checkers"
    15  	"github.com/juju/worker/v3"
    16  	"github.com/juju/worker/v3/workertest"
    17  	"go.uber.org/mock/gomock"
    18  	gc "gopkg.in/check.v1"
    19  
    20  	apiservererrors "github.com/juju/juju/apiserver/errors"
    21  	"github.com/juju/juju/core/instance"
    22  	"github.com/juju/juju/core/life"
    23  	"github.com/juju/juju/core/network"
    24  	"github.com/juju/juju/core/status"
    25  	"github.com/juju/juju/core/watcher"
    26  	"github.com/juju/juju/environs"
    27  	"github.com/juju/juju/environs/instances"
    28  	"github.com/juju/juju/rpc/params"
    29  	coretesting "github.com/juju/juju/testing"
    30  	"github.com/juju/juju/worker/instancepoller/mocks"
    31  )
    32  
    33  var (
    34  	_ = gc.Suite(&configSuite{})
    35  	_ = gc.Suite(&pollGroupEntrySuite{})
    36  	_ = gc.Suite(&workerSuite{})
    37  
    38  	testAddrs = network.ProviderAddresses{
    39  		network.NewMachineAddress(
    40  			"10.0.0.1", network.WithCIDR("10.0.0.0/24"), network.WithScope(network.ScopeCloudLocal),
    41  		).AsProviderAddress(),
    42  		network.NewMachineAddress(
    43  			"1.1.1.42", network.WithCIDR("1.1.1.0/24"), network.WithScope(network.ScopePublic),
    44  		).AsProviderAddress(),
    45  	}
    46  
    47  	testNetIfs = network.InterfaceInfos{
    48  		{
    49  			DeviceIndex:   0,
    50  			InterfaceName: "eth0",
    51  			MACAddress:    "de:ad:be:ef:00:00",
    52  			Addresses: network.ProviderAddresses{
    53  				network.NewMachineAddress(
    54  					"10.0.0.1", network.WithCIDR("10.0.0.0/24"), network.WithScope(network.ScopeCloudLocal),
    55  				).AsProviderAddress(),
    56  			},
    57  			ShadowAddresses: network.ProviderAddresses{
    58  				network.NewMachineAddress(
    59  					"1.1.1.42", network.WithCIDR("1.1.1.0/24"), network.WithScope(network.ScopePublic),
    60  				).AsProviderAddress(),
    61  			},
    62  		},
    63  	}
    64  )
    65  
    66  type configSuite struct{}
    67  
    68  func (s *configSuite) TestConfigValidation(c *gc.C) {
    69  	ctrl := gomock.NewController(c)
    70  	defer ctrl.Finish()
    71  
    72  	origCfg := Config{
    73  		Clock:         testclock.NewClock(time.Now()),
    74  		Facade:        newMockFacadeAPI(ctrl, nil),
    75  		Environ:       mocks.NewMockEnviron(ctrl),
    76  		Logger:        loggo.GetLogger("juju.worker.instancepoller"),
    77  		CredentialAPI: mocks.NewMockCredentialAPI(ctrl),
    78  	}
    79  	c.Assert(origCfg.Validate(), jc.ErrorIsNil)
    80  
    81  	testCfg := origCfg
    82  	testCfg.Clock = nil
    83  	c.Assert(testCfg.Validate(), gc.ErrorMatches, "nil clock.Clock.*")
    84  
    85  	testCfg = origCfg
    86  	testCfg.Facade = nil
    87  	c.Assert(testCfg.Validate(), gc.ErrorMatches, "nil Facade.*")
    88  
    89  	testCfg = origCfg
    90  	testCfg.Environ = nil
    91  	c.Assert(testCfg.Validate(), gc.ErrorMatches, "nil Environ.*")
    92  
    93  	testCfg = origCfg
    94  	testCfg.Logger = nil
    95  	c.Assert(testCfg.Validate(), gc.ErrorMatches, "nil Logger.*")
    96  
    97  	testCfg = origCfg
    98  	testCfg.CredentialAPI = nil
    99  	c.Assert(testCfg.Validate(), gc.ErrorMatches, "nil CredentialAPI.*")
   100  }
   101  
   102  type pollGroupEntrySuite struct{}
   103  
   104  func (s *pollGroupEntrySuite) TestShortPollIntervalLogic(c *gc.C) {
   105  	clock := testclock.NewClock(time.Now())
   106  	entry := new(pollGroupEntry)
   107  
   108  	// Test reset logic.
   109  	entry.resetShortPollInterval(clock)
   110  	c.Assert(entry.shortPollInterval, gc.Equals, ShortPoll)
   111  	c.Assert(entry.shortPollAt, gc.Equals, clock.Now().Add(ShortPoll))
   112  
   113  	// Ensure that bumping the short poll duration caps when we reach the
   114  	// LongPoll interval.
   115  	for i := 0; entry.shortPollInterval < LongPoll && i < 100; i++ {
   116  		entry.bumpShortPollInterval(clock)
   117  	}
   118  	c.Assert(entry.shortPollInterval, gc.Equals, ShortPollCap, gc.Commentf(
   119  		"short poll interval did not reach short poll cap interval after 100 interval bumps"))
   120  
   121  	// Check that once we reach the short poll cap interval we stay capped at it.
   122  	entry.bumpShortPollInterval(clock)
   123  	c.Assert(entry.shortPollInterval, gc.Equals, ShortPollCap, gc.Commentf(
   124  		"short poll should have been capped at the short poll cap interval"))
   125  	c.Assert(entry.shortPollAt, gc.Equals, clock.Now().Add(ShortPollCap))
   126  }
   127  
   128  type workerSuite struct{}
   129  
   130  func (s *workerSuite) TestQueueingNewMachineAddsItToShortPollGroup(c *gc.C) {
   131  	ctrl := gomock.NewController(c)
   132  	defer ctrl.Finish()
   133  
   134  	w, mocked := s.startWorker(c, ctrl)
   135  	defer workertest.CleanKill(c, w)
   136  	updWorker := w.(*updaterWorker)
   137  
   138  	// Instance poller will look up machine with id "0" and get back a
   139  	// non-manual machine.
   140  	machineTag := names.NewMachineTag("0")
   141  	nonManualMachine := mocks.NewMockMachine(ctrl)
   142  	nonManualMachine.EXPECT().IsManual().Return(false, nil)
   143  	mocked.facadeAPI.addMachine(machineTag, nonManualMachine)
   144  
   145  	// Queue machine.
   146  	err := updWorker.queueMachineForPolling(machineTag)
   147  	c.Assert(err, jc.ErrorIsNil)
   148  
   149  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 1, gc.Commentf("machine didn't end up in short poll group"))
   150  }
   151  
   152  func (s *workerSuite) TestQueueingExistingMachineAlwaysMovesItToShortPollGroup(c *gc.C) {
   153  	ctrl := gomock.NewController(c)
   154  	defer ctrl.Finish()
   155  
   156  	w, _ := s.startWorker(c, ctrl)
   157  	defer workertest.CleanKill(c, w)
   158  	updWorker := w.(*updaterWorker)
   159  
   160  	machineTag := names.NewMachineTag("0")
   161  	machine := mocks.NewMockMachine(ctrl)
   162  	machine.EXPECT().Refresh().Return(nil)
   163  	machine.EXPECT().Life().Return(life.Alive)
   164  	updWorker.appendToShortPollGroup(machineTag, machine)
   165  
   166  	// Manually move entry to long poll group.
   167  	entry, _ := updWorker.lookupPolledMachine(machineTag)
   168  	entry.shortPollInterval = LongPoll
   169  	updWorker.pollGroup[longPollGroup][machineTag] = entry
   170  	delete(updWorker.pollGroup[shortPollGroup], machineTag)
   171  
   172  	// Queue machine.
   173  	err := updWorker.queueMachineForPolling(machineTag)
   174  	c.Assert(err, jc.ErrorIsNil)
   175  
   176  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 1, gc.Commentf("machine didn't end up in short poll group"))
   177  	c.Assert(entry.shortPollInterval, gc.Equals, ShortPoll, gc.Commentf("poll interval was not reset"))
   178  }
   179  
   180  func (s *workerSuite) TestUpdateOfStatusAndAddressDetails(c *gc.C) {
   181  	ctrl := gomock.NewController(c)
   182  	defer ctrl.Finish()
   183  
   184  	w, _ := s.startWorker(c, ctrl)
   185  	defer workertest.CleanKill(c, w)
   186  	updWorker := w.(*updaterWorker)
   187  
   188  	// Start with an entry for machine "0"
   189  	machineTag := names.NewMachineTag("0")
   190  	machine := mocks.NewMockMachine(ctrl)
   191  	entry := &pollGroupEntry{
   192  		tag:        machineTag,
   193  		m:          machine,
   194  		instanceID: "b4dc0ffee",
   195  	}
   196  
   197  	// The machine is alive, has an instance status of "provisioning" and
   198  	// is aware of its network addresses.
   199  	machine.EXPECT().Id().Return("0").AnyTimes()
   200  	machine.EXPECT().Life().Return(life.Alive)
   201  	machine.EXPECT().InstanceStatus().Return(params.StatusResult{Status: string(status.Provisioning)}, nil)
   202  
   203  	// The provider reports the instance status as running and also indicates
   204  	// that network addresses have been *changed*.
   205  	instInfo := mocks.NewMockInstance(ctrl)
   206  	instInfo.EXPECT().Status(gomock.Any()).Return(instance.Status{Status: status.Running, Message: "Running wild"})
   207  
   208  	// When we process the instance info we expect the machine instance
   209  	// status and list of network addresses to be updated so they match
   210  	// the values reported by the provider.
   211  	machine.EXPECT().SetInstanceStatus(status.Running, "Running wild", nil).Return(nil)
   212  	machine.EXPECT().SetProviderNetworkConfig(testNetIfs).Return(testAddrs, true, nil)
   213  
   214  	providerStatus, addrCount, err := updWorker.processProviderInfo(entry, instInfo, testNetIfs)
   215  	c.Assert(err, jc.ErrorIsNil)
   216  	c.Assert(providerStatus, gc.Equals, status.Running)
   217  	c.Assert(addrCount, gc.Equals, len(testAddrs))
   218  }
   219  
   220  func (s *workerSuite) TestStartedMachineWithNetAddressesMovesToLongPollGroup(c *gc.C) {
   221  	ctrl := gomock.NewController(c)
   222  	defer ctrl.Finish()
   223  
   224  	w, _ := s.startWorker(c, ctrl)
   225  	defer workertest.CleanKill(c, w)
   226  	updWorker := w.(*updaterWorker)
   227  
   228  	// Start with machine "0" in the short poll group.
   229  	machineTag := names.NewMachineTag("0")
   230  	machine := mocks.NewMockMachine(ctrl)
   231  	updWorker.appendToShortPollGroup(machineTag, machine)
   232  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 1)
   233  
   234  	// The provider reports an instance status of "running"; the machine
   235  	// reports it's machine status as "started".
   236  	entry, _ := updWorker.lookupPolledMachine(machineTag)
   237  	updWorker.maybeSwitchPollGroup(shortPollGroup, entry, status.Running, status.Started, 1)
   238  
   239  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 0)
   240  	c.Assert(updWorker.pollGroup[longPollGroup], gc.HasLen, 1)
   241  }
   242  
   243  func (s *workerSuite) TestNonStartedMachinesGetBumpedPollInterval(c *gc.C) {
   244  	ctrl := gomock.NewController(c)
   245  	defer ctrl.Finish()
   246  
   247  	w, _ := s.startWorker(c, ctrl)
   248  	defer workertest.CleanKill(c, w)
   249  	updWorker := w.(*updaterWorker)
   250  
   251  	machine := mocks.NewMockMachine(ctrl)
   252  
   253  	specs := []status.Status{status.Allocating, status.Pending}
   254  	for specIndex, spec := range specs {
   255  		c.Logf("provider reports instance status as: %q", spec)
   256  		machineTag := names.NewMachineTag(fmt.Sprint(specIndex))
   257  		updWorker.appendToShortPollGroup(machineTag, machine)
   258  		entry, _ := updWorker.lookupPolledMachine(machineTag)
   259  
   260  		updWorker.maybeSwitchPollGroup(shortPollGroup, entry, spec, status.Pending, 0)
   261  		c.Assert(entry.shortPollInterval, gc.Equals, time.Duration(float64(ShortPoll)*ShortPollBackoff))
   262  	}
   263  }
   264  
   265  func (s *workerSuite) TestMoveMachineWithUnknownStatusBackToShortPollGroup(c *gc.C) {
   266  	ctrl := gomock.NewController(c)
   267  	defer ctrl.Finish()
   268  
   269  	w, _ := s.startWorker(c, ctrl)
   270  	defer workertest.CleanKill(c, w)
   271  	updWorker := w.(*updaterWorker)
   272  
   273  	// The machine is assigned a network address.
   274  	machineTag := names.NewMachineTag("0")
   275  	machine := mocks.NewMockMachine(ctrl)
   276  
   277  	// Move the machine to the long poll group.
   278  	updWorker.appendToShortPollGroup(machineTag, machine)
   279  	entry, _ := updWorker.lookupPolledMachine(machineTag)
   280  	updWorker.maybeSwitchPollGroup(shortPollGroup, entry, status.Running, status.Started, 1)
   281  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 0)
   282  	c.Assert(updWorker.pollGroup[longPollGroup], gc.HasLen, 1)
   283  
   284  	// If we get unknown status from the provider we expect the machine to
   285  	// be moved back to the short poll group.
   286  	updWorker.maybeSwitchPollGroup(longPollGroup, entry, status.Unknown, status.Started, 1)
   287  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 1)
   288  	c.Assert(updWorker.pollGroup[longPollGroup], gc.HasLen, 0)
   289  	c.Assert(entry.shortPollInterval, gc.Equals, ShortPoll)
   290  }
   291  
   292  func (s *workerSuite) TestSkipMachineIfShortPollTargetTimeNotElapsed(c *gc.C) {
   293  	ctrl := gomock.NewController(c)
   294  	defer ctrl.Finish()
   295  
   296  	w, mocked := s.startWorker(c, ctrl)
   297  	defer workertest.CleanKill(c, w)
   298  	updWorker := w.(*updaterWorker)
   299  
   300  	machineTag := names.NewMachineTag("0")
   301  	machine := mocks.NewMockMachine(ctrl)
   302  
   303  	// Add machine to short poll group and bump its poll interval
   304  	updWorker.appendToShortPollGroup(machineTag, machine)
   305  	entry, _ := updWorker.lookupPolledMachine(machineTag)
   306  	entry.bumpShortPollInterval(mocked.clock)
   307  	pollAt := entry.shortPollAt
   308  
   309  	// Advance the clock to trigger processing of the short poll groups
   310  	// but not far enough to process the entry with the bumped interval.
   311  	s.assertWorkerCompletesLoop(c, updWorker, func() {
   312  		mocked.clock.Advance(ShortPoll)
   313  	})
   314  
   315  	c.Assert(pollAt, gc.Equals, entry.shortPollAt, gc.Commentf("machine shouldn't have been polled"))
   316  }
   317  
   318  func (s *workerSuite) TestDeadMachineGetsRemoved(c *gc.C) {
   319  	ctrl := gomock.NewController(c)
   320  	defer ctrl.Finish()
   321  
   322  	w, mocked := s.startWorker(c, ctrl)
   323  	defer workertest.CleanKill(c, w)
   324  	updWorker := w.(*updaterWorker)
   325  
   326  	machineTag := names.NewMachineTag("0")
   327  	machine := mocks.NewMockMachine(ctrl)
   328  
   329  	// Add machine to short poll group
   330  	updWorker.appendToShortPollGroup(machineTag, machine)
   331  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 1)
   332  
   333  	// On next refresh, the machine reports as dead
   334  	machine.EXPECT().Refresh().Return(nil)
   335  	machine.EXPECT().Life().Return(life.Dead)
   336  
   337  	// Emit a change for the machine so the queueing code detects the
   338  	// dead machine and removes it.
   339  	s.assertWorkerCompletesLoop(c, updWorker, func() {
   340  		mocked.facadeAPI.assertEnqueueChange(c, []string{"0"})
   341  	})
   342  
   343  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 0, gc.Commentf("dead machine has not been removed"))
   344  }
   345  
   346  func (s *workerSuite) TestReapedMachineIsTreatedAsDeadAndRemoved(c *gc.C) {
   347  	ctrl := gomock.NewController(c)
   348  	defer ctrl.Finish()
   349  
   350  	w, mocked := s.startWorker(c, ctrl)
   351  	defer workertest.CleanKill(c, w)
   352  	updWorker := w.(*updaterWorker)
   353  
   354  	machineTag := names.NewMachineTag("0")
   355  	machine := mocks.NewMockMachine(ctrl)
   356  
   357  	// Add machine to short poll group
   358  	updWorker.appendToShortPollGroup(machineTag, machine)
   359  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 1)
   360  
   361  	// On next refresh, the machine refresh fails with NotFoudn
   362  	machine.EXPECT().Refresh().Return(
   363  		errors.NotFoundf("this is not the machine you are looking for"),
   364  	)
   365  
   366  	// Emit a change for the machine so the queueing code detects the
   367  	// dead machine and removes it.
   368  	s.assertWorkerCompletesLoop(c, updWorker, func() {
   369  		mocked.facadeAPI.assertEnqueueChange(c, []string{"0"})
   370  	})
   371  
   372  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 0, gc.Commentf("dead machine has not been removed"))
   373  }
   374  
   375  func (s *workerSuite) TestQueuingOfManualMachines(c *gc.C) {
   376  	ctrl := gomock.NewController(c)
   377  	defer ctrl.Finish()
   378  
   379  	w, mocked := s.startWorker(c, ctrl)
   380  	defer workertest.CleanKill(c, w)
   381  	updWorker := w.(*updaterWorker)
   382  
   383  	// Add two manual machines, one with "provisioning" status and one with
   384  	// "started" status. We expect the former to have its instance status
   385  	// changed to "running".
   386  	machineTag0 := names.NewMachineTag("0")
   387  	machine0 := mocks.NewMockMachine(ctrl)
   388  	machine0.EXPECT().IsManual().Return(true, nil)
   389  	machine0.EXPECT().InstanceStatus().Return(params.StatusResult{Status: string(status.Provisioning)}, nil)
   390  	machine0.EXPECT().SetInstanceStatus(status.Running, "Manually provisioned machine", nil).Return(nil)
   391  	mocked.facadeAPI.addMachine(machineTag0, machine0)
   392  
   393  	machineTag1 := names.NewMachineTag("1")
   394  	machine1 := mocks.NewMockMachine(ctrl)
   395  	machine1.EXPECT().IsManual().Return(true, nil)
   396  	machine1.EXPECT().InstanceStatus().Return(params.StatusResult{Status: string(status.Running)}, nil)
   397  	mocked.facadeAPI.addMachine(machineTag1, machine1)
   398  
   399  	// Emit change for both machines.
   400  	s.assertWorkerCompletesLoop(c, updWorker, func() {
   401  		mocked.facadeAPI.assertEnqueueChange(c, []string{"0", "1"})
   402  	})
   403  
   404  	// None of the machines should have been added.
   405  	c.Assert(updWorker.pollGroup[shortPollGroup], gc.HasLen, 0)
   406  	c.Assert(updWorker.pollGroup[longPollGroup], gc.HasLen, 0)
   407  }
   408  
   409  func (s *workerSuite) TestBatchPollingOfGroupMembers(c *gc.C) {
   410  	ctrl := gomock.NewController(c)
   411  	defer ctrl.Finish()
   412  
   413  	w, mocked := s.startWorker(c, ctrl)
   414  	defer workertest.CleanKill(c, w)
   415  	updWorker := w.(*updaterWorker)
   416  
   417  	// Add two machines, one that is not yet provisioned and one that is
   418  	// has a "created" machine status and a "running" instance status.
   419  	machineTag0 := names.NewMachineTag("0")
   420  	machine0 := mocks.NewMockMachine(ctrl)
   421  	machine0.EXPECT().InstanceId().Return(instance.Id(""), apiservererrors.ServerError(errors.NotProvisionedf("not there")))
   422  	machine0.EXPECT().Id().Return("0")
   423  	updWorker.appendToShortPollGroup(machineTag0, machine0)
   424  
   425  	machineTag1 := names.NewMachineTag("1")
   426  	machine1 := mocks.NewMockMachine(ctrl)
   427  	machine1.EXPECT().Life().Return(life.Alive)
   428  	machine1.EXPECT().InstanceId().Return(instance.Id("b4dc0ffee"), nil)
   429  	machine1.EXPECT().InstanceStatus().Return(params.StatusResult{Status: string(status.Running)}, nil)
   430  	machine1.EXPECT().Status().Return(params.StatusResult{Status: string(status.Started)}, nil)
   431  	machine1.EXPECT().SetProviderNetworkConfig(testNetIfs).Return(testAddrs, false, nil)
   432  	updWorker.appendToShortPollGroup(machineTag1, machine1)
   433  
   434  	machine1Info := mocks.NewMockInstance(ctrl)
   435  	machine1Info.EXPECT().Status(gomock.Any()).Return(instance.Status{Status: status.Running})
   436  	mocked.environ.EXPECT().Instances(gomock.Any(), []instance.Id{"b4dc0ffee"}).Return([]instances.Instance{machine1Info}, nil)
   437  	mocked.environ.EXPECT().NetworkInterfaces(gomock.Any(), []instance.Id{"b4dc0ffee"}).Return(
   438  		[]network.InterfaceInfos{testNetIfs},
   439  		nil,
   440  	)
   441  
   442  	// Trigger a poll of the short poll group and wait for the worker loop
   443  	// to complete.
   444  	s.assertWorkerCompletesLoop(c, updWorker, func() {
   445  		mocked.clock.Advance(ShortPoll)
   446  	})
   447  }
   448  
   449  func (s *workerSuite) TestLongPollMachineNotKnownByProvider(c *gc.C) {
   450  	ctrl := gomock.NewController(c)
   451  	defer ctrl.Finish()
   452  
   453  	w, mocked := s.startWorker(c, ctrl)
   454  	defer workertest.CleanKill(c, w)
   455  	updWorker := w.(*updaterWorker)
   456  
   457  	machineTag := names.NewMachineTag("0")
   458  	machine := mocks.NewMockMachine(ctrl)
   459  
   460  	// Add machine to short poll group and manually move it to long poll group.
   461  	updWorker.appendToShortPollGroup(machineTag, machine)
   462  	entry, _ := updWorker.lookupPolledMachine(machineTag)
   463  	updWorker.pollGroup[longPollGroup][machineTag] = entry
   464  	delete(updWorker.pollGroup[shortPollGroup], machineTag)
   465  
   466  	// Allow instance ID to be resolved but have the provider's Instances
   467  	// call fail with a partial instance list.
   468  	instID := instance.Id("d3adc0de")
   469  	machine.EXPECT().InstanceId().Return(instID, nil)
   470  	mocked.environ.EXPECT().Instances(gomock.Any(), []instance.Id{instID}).Return(
   471  		[]instances.Instance{}, environs.ErrPartialInstances,
   472  	)
   473  	mocked.environ.EXPECT().NetworkInterfaces(gomock.Any(), []instance.Id{instID}).Return(
   474  		nil, nil,
   475  	)
   476  
   477  	// Advance the clock to trigger processing of both the short AND long
   478  	// poll groups. This should trigger to full loop runs.
   479  	s.assertWorkerCompletesLoops(c, updWorker, 2, func() {
   480  		mocked.clock.Advance(LongPoll)
   481  	})
   482  }
   483  
   484  func (s *workerSuite) TestShortPollMachineNotKnownByProviderIntervalBackoff(c *gc.C) {
   485  	ctrl := gomock.NewController(c)
   486  	defer ctrl.Finish()
   487  
   488  	w, mocked := s.startWorker(c, ctrl)
   489  	defer workertest.CleanKill(c, w)
   490  	updWorker := w.(*updaterWorker)
   491  
   492  	machineTag := names.NewMachineTag("0")
   493  	machine := mocks.NewMockMachine(ctrl)
   494  
   495  	updWorker.appendToShortPollGroup(machineTag, machine)
   496  
   497  	// Allow instance ID to be resolved but have the provider's Instances
   498  	// call fail with a partial instance list.
   499  	instID := instance.Id("d3adc0de")
   500  	machine.EXPECT().InstanceId().Return(instID, nil)
   501  	mocked.environ.EXPECT().Instances(gomock.Any(), []instance.Id{instID}).Return(
   502  		[]instances.Instance{nil}, environs.ErrPartialInstances,
   503  	)
   504  	mocked.environ.EXPECT().NetworkInterfaces(gomock.Any(), []instance.Id{instID}).Return(
   505  		nil, nil,
   506  	)
   507  
   508  	// Advance the clock to trigger processing of the short poll group.
   509  	s.assertWorkerCompletesLoops(c, updWorker, 1, func() {
   510  		mocked.clock.Advance(ShortPoll)
   511  	})
   512  
   513  	// Check that we have backed off the poll interval.
   514  	entry, _ := updWorker.lookupPolledMachine(machineTag)
   515  	c.Assert(entry.shortPollInterval, gc.Equals, time.Duration(float64(ShortPoll)*ShortPollBackoff))
   516  }
   517  
   518  func (s *workerSuite) TestLongPollNoMachineInGroupKnownByProvider(c *gc.C) {
   519  	ctrl := gomock.NewController(c)
   520  	defer ctrl.Finish()
   521  
   522  	w, mocked := s.startWorker(c, ctrl)
   523  	defer workertest.CleanKill(c, w)
   524  	updWorker := w.(*updaterWorker)
   525  
   526  	machineTag := names.NewMachineTag("0")
   527  	machine := mocks.NewMockMachine(ctrl)
   528  
   529  	// Add machine to short poll group and manually move it to long poll group.
   530  	updWorker.appendToShortPollGroup(machineTag, machine)
   531  	entry, _ := updWorker.lookupPolledMachine(machineTag)
   532  	updWorker.pollGroup[longPollGroup][machineTag] = entry
   533  	delete(updWorker.pollGroup[shortPollGroup], machineTag)
   534  
   535  	// Allow instance ID to be resolved but have the provider's Instances
   536  	// call fail with ErrNoInstances. This is probably rare but can happen
   537  	// and shouldn't cause the worker to exit with an error!
   538  	instID := instance.Id("d3adc0de")
   539  	machine.EXPECT().InstanceId().Return(instID, nil)
   540  	mocked.environ.EXPECT().Instances(gomock.Any(), []instance.Id{instID}).Return(
   541  		nil, environs.ErrNoInstances,
   542  	)
   543  
   544  	// Advance the clock to trigger processing of both the short AND long
   545  	// poll groups. This should trigger to full loop runs.
   546  	s.assertWorkerCompletesLoops(c, updWorker, 2, func() {
   547  		mocked.clock.Advance(LongPoll)
   548  	})
   549  }
   550  
   551  func (s *workerSuite) TestShortPollNoMachineInGroupKnownByProviderIntervalBackoff(c *gc.C) {
   552  	ctrl := gomock.NewController(c)
   553  	defer ctrl.Finish()
   554  
   555  	w, mocked := s.startWorker(c, ctrl)
   556  	defer workertest.CleanKill(c, w)
   557  	updWorker := w.(*updaterWorker)
   558  
   559  	machineTag := names.NewMachineTag("0")
   560  	machine := mocks.NewMockMachine(ctrl)
   561  
   562  	// Add machine to short poll group and manually move it to long poll group.
   563  	updWorker.appendToShortPollGroup(machineTag, machine)
   564  
   565  	// Allow instance ID to be resolved but have the provider's Instances
   566  	// call fail with ErrNoInstances. This is probably rare but can happen
   567  	// and shouldn't cause the worker to exit with an error!
   568  	instID := instance.Id("d3adc0de")
   569  	machine.EXPECT().InstanceId().Return(instID, nil)
   570  	mocked.environ.EXPECT().Instances(gomock.Any(), []instance.Id{instID}).Return(
   571  		nil, environs.ErrNoInstances,
   572  	)
   573  
   574  	// Advance the clock to trigger processing of the short poll group.
   575  	s.assertWorkerCompletesLoops(c, updWorker, 1, func() {
   576  		mocked.clock.Advance(ShortPoll)
   577  	})
   578  
   579  	// Check that we have backed off the poll interval.
   580  	entry, _ := updWorker.lookupPolledMachine(machineTag)
   581  	c.Assert(entry.shortPollInterval, gc.Equals, time.Duration(float64(ShortPoll)*ShortPollBackoff))
   582  }
   583  
   584  func (s *workerSuite) assertWorkerCompletesLoop(c *gc.C, w *updaterWorker, triggerFn func()) {
   585  	s.assertWorkerCompletesLoops(c, w, 1, triggerFn)
   586  }
   587  
   588  func (s *workerSuite) assertWorkerCompletesLoops(c *gc.C, w *updaterWorker, numLoops int, triggerFn func()) {
   589  	ch := make(chan struct{})
   590  	defer func() { w.loopCompletedHook = nil }()
   591  
   592  	w.loopCompletedHook = func() { ch <- struct{}{} }
   593  	triggerFn()
   594  
   595  	for loop := 0; loop < numLoops; loop++ {
   596  		select {
   597  		case <-ch: // loop completed
   598  		case <-time.After(coretesting.ShortWait):
   599  			c.Fatal("timed out waiting for instance poller to complete a full loop")
   600  		}
   601  	}
   602  }
   603  
   604  type workerMocks struct {
   605  	clock     *testclock.Clock
   606  	facadeAPI *mockFacadeAPI
   607  	environ   *mocks.MockEnviron
   608  }
   609  
   610  func (s *workerSuite) startWorker(c *gc.C, ctrl *gomock.Controller) (worker.Worker, workerMocks) {
   611  	workerMainLoopEnteredCh := make(chan struct{}, 1)
   612  	mocked := workerMocks{
   613  		clock:     testclock.NewClock(time.Now()),
   614  		facadeAPI: newMockFacadeAPI(ctrl, workerMainLoopEnteredCh),
   615  		environ:   mocks.NewMockEnviron(ctrl),
   616  	}
   617  
   618  	w, err := NewWorker(Config{
   619  		Clock:         mocked.clock,
   620  		Facade:        mocked.facadeAPI,
   621  		Environ:       mocked.environ,
   622  		CredentialAPI: mocks.NewMockCredentialAPI(ctrl),
   623  		Logger:        loggo.GetLogger("juju.worker.instancepoller"),
   624  	})
   625  	c.Assert(err, jc.ErrorIsNil)
   626  
   627  	// Wait for worker to reach main loop before we allow tests to
   628  	// manipulate the clock.
   629  	select {
   630  	case <-workerMainLoopEnteredCh:
   631  	case <-time.After(coretesting.ShortWait):
   632  		c.Fatal("timed out wating for worker to enter main loop")
   633  	}
   634  
   635  	return w, mocked
   636  }
   637  
   638  // mockFacadeAPI is a workaround for not being able to use gomock for the
   639  // FacadeAPI interface. Because the Machine() method returns a Machine interface,
   640  // gomock will import instancepoller and cause an import cycle.
   641  type mockFacadeAPI struct {
   642  	machineMap map[names.MachineTag]Machine
   643  
   644  	sw              *mocks.MockStringsWatcher
   645  	watcherChangeCh chan []string
   646  }
   647  
   648  func newMockFacadeAPI(ctrl *gomock.Controller, workerGotWatcherCh chan<- struct{}) *mockFacadeAPI {
   649  	api := &mockFacadeAPI{
   650  		machineMap:      make(map[names.MachineTag]Machine),
   651  		sw:              mocks.NewMockStringsWatcher(ctrl),
   652  		watcherChangeCh: make(chan []string),
   653  	}
   654  
   655  	api.sw.EXPECT().Changes().DoAndReturn(func() watcher.StringsChannel {
   656  		select {
   657  		case workerGotWatcherCh <- struct{}{}:
   658  		default:
   659  		}
   660  		return api.watcherChangeCh
   661  	}).AnyTimes()
   662  	api.sw.EXPECT().Kill().AnyTimes()
   663  	api.sw.EXPECT().Wait().AnyTimes()
   664  	return api
   665  }
   666  
   667  func (api *mockFacadeAPI) assertEnqueueChange(c *gc.C, values []string) {
   668  	select {
   669  	case api.watcherChangeCh <- values:
   670  	case <-time.After(coretesting.ShortWait):
   671  		c.Fatal("timed out waiting for worker to pick up change")
   672  	}
   673  }
   674  func (api *mockFacadeAPI) addMachine(tag names.MachineTag, m Machine) { api.machineMap[tag] = m }
   675  
   676  func (api *mockFacadeAPI) WatchModelMachines() (watcher.StringsWatcher, error) { return api.sw, nil }
   677  func (api *mockFacadeAPI) Machine(tag names.MachineTag) (Machine, error) {
   678  	if found := api.machineMap[tag]; found != nil {
   679  		return found, nil
   680  	}
   681  	return nil, errors.NotFoundf(tag.String())
   682  }