github.com/makyo/juju@v0.0.0-20160425123129-2608902037e9/worker/upgradesteps/worker_test.go (about)

     1  // Copyright 2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package upgradesteps
     5  
     6  import (
     7  	"fmt"
     8  	"time"
     9  
    10  	"github.com/juju/errors"
    11  	"github.com/juju/loggo"
    12  	"github.com/juju/names"
    13  	jc "github.com/juju/testing/checkers"
    14  	"github.com/juju/utils"
    15  	"github.com/juju/utils/arch"
    16  	"github.com/juju/utils/series"
    17  	gc "gopkg.in/check.v1"
    18  
    19  	"github.com/juju/juju/agent"
    20  	cmdutil "github.com/juju/juju/cmd/jujud/util"
    21  	"github.com/juju/juju/constraints"
    22  	"github.com/juju/juju/environs"
    23  	"github.com/juju/juju/instance"
    24  	"github.com/juju/juju/mongo"
    25  	"github.com/juju/juju/state"
    26  	"github.com/juju/juju/state/multiwatcher"
    27  	statetesting "github.com/juju/juju/state/testing"
    28  	"github.com/juju/juju/status"
    29  	coretesting "github.com/juju/juju/testing"
    30  	"github.com/juju/juju/testing/factory"
    31  	"github.com/juju/juju/upgrades"
    32  	jujuversion "github.com/juju/juju/version"
    33  	"github.com/juju/juju/worker/gate"
    34  	"github.com/juju/version"
    35  )
    36  
    37  // TODO(mjs) - these tests are too tightly coupled to the
    38  // implementation. They needn't be internal tests.
    39  
    40  type UpgradeSuite struct {
    41  	statetesting.StateSuite
    42  
    43  	oldVersion      version.Binary
    44  	logWriter       loggo.TestWriter
    45  	connectionDead  bool
    46  	machineIsMaster bool
    47  	preUpgradeError bool
    48  }
    49  
    50  var _ = gc.Suite(&UpgradeSuite{})
    51  
    52  const fails = true
    53  const succeeds = false
    54  
    55  func (s *UpgradeSuite) SetUpTest(c *gc.C) {
    56  	s.StateSuite.SetUpTest(c)
    57  
    58  	s.preUpgradeError = false
    59  	// Most of these tests normally finish sub-second on a fast machine.
    60  	// If any given test hits a minute, we have almost certainly become
    61  	// wedged, so dump the logs.
    62  	coretesting.DumpTestLogsAfter(time.Minute, c, s)
    63  
    64  	s.oldVersion = version.Binary{
    65  		Number: jujuversion.Current,
    66  		Arch:   arch.HostArch(),
    67  		Series: series.HostSeries(),
    68  	}
    69  	s.oldVersion.Major = 1
    70  	s.oldVersion.Minor = 16
    71  
    72  	// Don't wait so long in tests.
    73  	s.PatchValue(&UpgradeStartTimeoutMaster, time.Duration(time.Millisecond*50))
    74  	s.PatchValue(&UpgradeStartTimeoutSecondary, time.Duration(time.Millisecond*60))
    75  
    76  	// Allow tests to make the API connection appear to be dead.
    77  	s.connectionDead = false
    78  	s.PatchValue(&cmdutil.ConnectionIsDead, func(loggo.Logger, cmdutil.Pinger) bool {
    79  		return s.connectionDead
    80  	})
    81  
    82  	s.machineIsMaster = true
    83  	fakeIsMachineMaster := func(*state.State, string) (bool, error) {
    84  		return s.machineIsMaster, nil
    85  	}
    86  	s.PatchValue(&IsMachineMaster, fakeIsMachineMaster)
    87  
    88  }
    89  
    90  func (s *UpgradeSuite) captureLogs(c *gc.C) {
    91  	c.Assert(loggo.RegisterWriter("upgrade-tests", &s.logWriter, loggo.INFO), gc.IsNil)
    92  	s.AddCleanup(func(*gc.C) {
    93  		loggo.RemoveWriter("upgrade-tests")
    94  		s.logWriter.Clear()
    95  	})
    96  }
    97  
    98  func (s *UpgradeSuite) countUpgradeAttempts(upgradeErr error) *int {
    99  	count := 0
   100  	s.PatchValue(&PerformUpgrade, func(version.Number, []upgrades.Target, upgrades.Context) error {
   101  		count++
   102  		return upgradeErr
   103  	})
   104  	return &count
   105  }
   106  
   107  func (s *UpgradeSuite) TestNewChannelWhenNoUpgradeRequired(c *gc.C) {
   108  	// Set the agent's initial upgradedToVersion to almost the same as
   109  	// the current version. We want it to be different to
   110  	// jujuversion.Current (so that we can see it change) but not to
   111  	// trigger upgrade steps.
   112  	config := NewFakeConfigSetter(names.NewMachineTag("0"), makeBumpedCurrentVersion().Number)
   113  	agent := NewFakeAgent(config)
   114  
   115  	lock, err := NewLock(agent)
   116  	c.Assert(err, jc.ErrorIsNil)
   117  
   118  	c.Assert(lock.IsUnlocked(), jc.IsTrue)
   119  	// The agent's version should have been updated.
   120  	c.Assert(config.Version, gc.Equals, jujuversion.Current)
   121  
   122  }
   123  
   124  func (s *UpgradeSuite) TestNewChannelWhenUpgradeRequired(c *gc.C) {
   125  	// Set the agent's upgradedToVersion so that upgrade steps are required.
   126  	initialVersion := version.MustParse("1.16.0")
   127  	config := NewFakeConfigSetter(names.NewMachineTag("0"), initialVersion)
   128  	agent := NewFakeAgent(config)
   129  
   130  	lock, err := NewLock(agent)
   131  	c.Assert(err, jc.ErrorIsNil)
   132  
   133  	c.Assert(lock.IsUnlocked(), jc.IsFalse)
   134  	// The agent's version should NOT have been updated.
   135  	c.Assert(config.Version, gc.Equals, initialVersion)
   136  }
   137  
   138  func (s *UpgradeSuite) TestRetryStrategy(c *gc.C) {
   139  	retries := getUpgradeRetryStrategy()
   140  	c.Assert(retries.Delay, gc.Equals, 2*time.Minute)
   141  	c.Assert(retries.Min, gc.Equals, 5)
   142  }
   143  
   144  func (s *UpgradeSuite) TestNoUpgradeNecessary(c *gc.C) {
   145  	attemptsP := s.countUpgradeAttempts(nil)
   146  	s.captureLogs(c)
   147  	s.oldVersion.Number = jujuversion.Current // nothing to do
   148  
   149  	workerErr, config, _, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits)
   150  
   151  	c.Check(workerErr, gc.IsNil)
   152  	c.Check(*attemptsP, gc.Equals, 0)
   153  	c.Check(config.Version, gc.Equals, jujuversion.Current)
   154  	c.Check(doneLock.IsUnlocked(), jc.IsTrue)
   155  }
   156  
   157  func (s *UpgradeSuite) TestUpgradeStepsFailure(c *gc.C) {
   158  	// This test checks what happens when every upgrade attempt fails.
   159  	// A number of retries should be observed and the agent should end
   160  	// up in a state where it is is still running but is reporting an
   161  	// error and the upgrade is not flagged as having completed (which
   162  	// prevents most of the agent's workers from running and keeps the
   163  	// API in restricted mode).
   164  
   165  	attemptsP := s.countUpgradeAttempts(errors.New("boom"))
   166  	s.captureLogs(c)
   167  
   168  	workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits)
   169  
   170  	// The worker shouldn't return an error so that the worker and
   171  	// agent keep running.
   172  	c.Check(workerErr, gc.IsNil)
   173  
   174  	c.Check(*attemptsP, gc.Equals, maxUpgradeRetries)
   175  	c.Check(config.Version, gc.Equals, s.oldVersion.Number) // Upgrade didn't finish
   176  	c.Assert(statusCalls, jc.DeepEquals,
   177  		s.makeExpectedStatusCalls(maxUpgradeRetries-1, fails, "boom"))
   178  	c.Assert(s.logWriter.Log(), jc.LogMatches,
   179  		s.makeExpectedUpgradeLogs(maxUpgradeRetries-1, "hostMachine", fails, "boom"))
   180  	c.Assert(doneLock.IsUnlocked(), jc.IsFalse)
   181  }
   182  
   183  func (s *UpgradeSuite) TestUpgradeStepsRetries(c *gc.C) {
   184  	// This test checks what happens when the first upgrade attempt
   185  	// fails but the following on succeeds. The final state should be
   186  	// the same as a successful upgrade which worked first go.
   187  	attempts := 0
   188  	fail := true
   189  	fakePerformUpgrade := func(version.Number, []upgrades.Target, upgrades.Context) error {
   190  		attempts++
   191  		if fail {
   192  			fail = false
   193  			return errors.New("boom")
   194  		} else {
   195  			return nil
   196  		}
   197  	}
   198  	s.PatchValue(&PerformUpgrade, fakePerformUpgrade)
   199  	s.captureLogs(c)
   200  
   201  	workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits)
   202  
   203  	c.Check(workerErr, gc.IsNil)
   204  	c.Check(attempts, gc.Equals, 2)
   205  	c.Check(config.Version, gc.Equals, jujuversion.Current) // Upgrade finished
   206  	c.Assert(statusCalls, jc.DeepEquals, s.makeExpectedStatusCalls(1, succeeds, "boom"))
   207  	c.Assert(s.logWriter.Log(), jc.LogMatches, s.makeExpectedUpgradeLogs(1, "hostMachine", succeeds, "boom"))
   208  	c.Check(doneLock.IsUnlocked(), jc.IsTrue)
   209  }
   210  
   211  func (s *UpgradeSuite) TestOtherUpgradeRunFailure(c *gc.C) {
   212  	// This test checks what happens something other than the upgrade
   213  	// steps themselves fails, ensuring the something is logged and
   214  	// the agent status is updated.
   215  
   216  	fakePerformUpgrade := func(version.Number, []upgrades.Target, upgrades.Context) error {
   217  		// Delete UpgradeInfo for the upgrade so that finaliseUpgrade() will fail
   218  		s.State.ClearUpgradeInfo()
   219  		return nil
   220  	}
   221  	s.PatchValue(&PerformUpgrade, fakePerformUpgrade)
   222  	s.Factory.MakeMachine(c, &factory.MachineParams{
   223  		Jobs: []state.MachineJob{state.JobManageModel},
   224  	})
   225  	s.captureLogs(c)
   226  
   227  	workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobManageModel)
   228  
   229  	c.Check(workerErr, gc.IsNil)
   230  	c.Check(config.Version, gc.Equals, jujuversion.Current) // Upgrade almost finished
   231  	failReason := `upgrade done but: cannot set upgrade status to "finishing": ` +
   232  		`Another status change may have occurred concurrently`
   233  	c.Assert(statusCalls, jc.DeepEquals,
   234  		s.makeExpectedStatusCalls(0, fails, failReason))
   235  	c.Assert(s.logWriter.Log(), jc.LogMatches,
   236  		s.makeExpectedUpgradeLogs(0, "databaseMaster", fails, failReason))
   237  	c.Assert(doneLock.IsUnlocked(), jc.IsFalse)
   238  }
   239  
   240  func (s *UpgradeSuite) TestApiConnectionFailure(c *gc.C) {
   241  	// This test checks what happens when an upgrade fails because the
   242  	// connection to mongo has gone away. This will happen when the
   243  	// mongo master changes. In this case we want the upgrade worker
   244  	// to return immediately without further retries. The error should
   245  	// be returned by the worker so that the agent will restart.
   246  
   247  	attemptsP := s.countUpgradeAttempts(errors.New("boom"))
   248  	s.connectionDead = true // Make the connection to state appear to be dead
   249  	s.captureLogs(c)
   250  
   251  	workerErr, config, _, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits)
   252  
   253  	c.Check(workerErr, gc.ErrorMatches, "API connection lost during upgrade: boom")
   254  	c.Check(*attemptsP, gc.Equals, 1)
   255  	c.Check(config.Version, gc.Equals, s.oldVersion.Number) // Upgrade didn't finish
   256  	c.Assert(doneLock.IsUnlocked(), jc.IsFalse)
   257  }
   258  
   259  func (s *UpgradeSuite) TestAbortWhenOtherControllerDoesntStartUpgrade(c *gc.C) {
   260  	// This test checks when a controller is upgrading and one of
   261  	// the other controllers doesn't signal it is ready in time.
   262  
   263  	err := s.State.SetModelAgentVersion(jujuversion.Current)
   264  	c.Assert(err, jc.ErrorIsNil)
   265  
   266  	// The master controller in this scenario is functionally tested
   267  	// elsewhere.
   268  	s.machineIsMaster = false
   269  
   270  	s.create3Controllers(c)
   271  	s.captureLogs(c)
   272  	attemptsP := s.countUpgradeAttempts(nil)
   273  
   274  	workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobManageModel)
   275  
   276  	c.Check(workerErr, gc.IsNil)
   277  	c.Check(*attemptsP, gc.Equals, 0)
   278  	c.Check(config.Version, gc.Equals, s.oldVersion.Number) // Upgrade didn't happen
   279  	c.Assert(doneLock.IsUnlocked(), jc.IsFalse)
   280  
   281  	// The environment agent-version should still be the new version.
   282  	// It's up to the master to trigger the rollback.
   283  	s.assertEnvironAgentVersion(c, jujuversion.Current)
   284  
   285  	causeMsg := " timed out after 60ms"
   286  	c.Assert(s.logWriter.Log(), jc.LogMatches, []jc.SimpleMessage{
   287  		{loggo.INFO, "waiting for other controllers to be ready for upgrade"},
   288  		{loggo.ERROR, "aborted wait for other controllers: timed out after 60ms"},
   289  		{loggo.ERROR, `upgrade from .+ to .+ for "machine-0" failed \(giving up\): ` +
   290  			"aborted wait for other controllers:" + causeMsg},
   291  	})
   292  	c.Assert(statusCalls, jc.DeepEquals, []StatusCall{{
   293  		status.StatusError,
   294  		fmt.Sprintf(
   295  			"upgrade to %s failed (giving up): aborted wait for other controllers:"+causeMsg,
   296  			jujuversion.Current),
   297  	}})
   298  }
   299  
   300  func (s *UpgradeSuite) TestSuccessMaster(c *gc.C) {
   301  	// This test checks what happens when an upgrade works on the
   302  	// first attempt on a master controller.
   303  	s.machineIsMaster = true
   304  	info := s.checkSuccess(c, "databaseMaster", func(*state.UpgradeInfo) {})
   305  	c.Assert(info.Status(), gc.Equals, state.UpgradeFinishing)
   306  }
   307  
   308  func (s *UpgradeSuite) TestSuccessSecondary(c *gc.C) {
   309  	// This test checks what happens when an upgrade works on the
   310  	// first attempt on a secondary controller.
   311  	s.machineIsMaster = false
   312  	mungeInfo := func(info *state.UpgradeInfo) {
   313  		// Indicate that the master is done
   314  		err := info.SetStatus(state.UpgradeRunning)
   315  		c.Assert(err, jc.ErrorIsNil)
   316  		err = info.SetStatus(state.UpgradeFinishing)
   317  		c.Assert(err, jc.ErrorIsNil)
   318  	}
   319  	s.checkSuccess(c, "controller", mungeInfo)
   320  }
   321  
   322  func (s *UpgradeSuite) checkSuccess(c *gc.C, target string, mungeInfo func(*state.UpgradeInfo)) *state.UpgradeInfo {
   323  	_, machineIdB, machineIdC := s.create3Controllers(c)
   324  
   325  	// Indicate that machine B and C are ready to upgrade
   326  	vPrevious := s.oldVersion.Number
   327  	vNext := jujuversion.Current
   328  	info, err := s.State.EnsureUpgradeInfo(machineIdB, vPrevious, vNext)
   329  	c.Assert(err, jc.ErrorIsNil)
   330  	_, err = s.State.EnsureUpgradeInfo(machineIdC, vPrevious, vNext)
   331  	c.Assert(err, jc.ErrorIsNil)
   332  
   333  	mungeInfo(info)
   334  
   335  	attemptsP := s.countUpgradeAttempts(nil)
   336  	s.captureLogs(c)
   337  
   338  	workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobManageModel)
   339  
   340  	c.Check(workerErr, gc.IsNil)
   341  	c.Check(*attemptsP, gc.Equals, 1)
   342  	c.Check(config.Version, gc.Equals, jujuversion.Current) // Upgrade finished
   343  	c.Assert(statusCalls, jc.DeepEquals, s.makeExpectedStatusCalls(0, succeeds, ""))
   344  	c.Assert(s.logWriter.Log(), jc.LogMatches, s.makeExpectedUpgradeLogs(0, target, succeeds, ""))
   345  	c.Check(doneLock.IsUnlocked(), jc.IsTrue)
   346  
   347  	err = info.Refresh()
   348  	c.Assert(err, jc.ErrorIsNil)
   349  	c.Assert(info.ControllersDone(), jc.DeepEquals, []string{"0"})
   350  	return info
   351  }
   352  
   353  func (s *UpgradeSuite) TestJobsToTargets(c *gc.C) {
   354  	check := func(jobs []multiwatcher.MachineJob, isMaster bool, expectedTargets ...upgrades.Target) {
   355  		c.Assert(jobsToTargets(jobs, isMaster), jc.SameContents, expectedTargets)
   356  	}
   357  
   358  	check([]multiwatcher.MachineJob{multiwatcher.JobHostUnits}, false, upgrades.HostMachine)
   359  	check([]multiwatcher.MachineJob{multiwatcher.JobManageModel}, false, upgrades.Controller)
   360  	check([]multiwatcher.MachineJob{multiwatcher.JobManageModel}, true,
   361  		upgrades.Controller, upgrades.DatabaseMaster)
   362  	check([]multiwatcher.MachineJob{multiwatcher.JobManageModel, multiwatcher.JobHostUnits}, false,
   363  		upgrades.Controller, upgrades.HostMachine)
   364  	check([]multiwatcher.MachineJob{multiwatcher.JobManageModel, multiwatcher.JobHostUnits}, true,
   365  		upgrades.Controller, upgrades.DatabaseMaster, upgrades.HostMachine)
   366  }
   367  
   368  func (s *UpgradeSuite) TestPreUpgradeFail(c *gc.C) {
   369  	s.preUpgradeError = true
   370  	s.captureLogs(c)
   371  
   372  	workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits)
   373  
   374  	c.Check(workerErr, jc.ErrorIsNil)
   375  	c.Check(config.Version, gc.Equals, s.oldVersion.Number) // Upgrade didn't finish
   376  	c.Assert(doneLock.IsUnlocked(), jc.IsFalse)
   377  
   378  	causeMessage := `machine 0 cannot be upgraded: preupgrade error`
   379  	failMessage := fmt.Sprintf(
   380  		`upgrade from %s to %s for "machine-0" failed \(giving up\): %s`,
   381  		s.oldVersion.Number, jujuversion.Current, causeMessage)
   382  	c.Assert(s.logWriter.Log(), jc.LogMatches, []jc.SimpleMessage{
   383  		{loggo.INFO, "checking that upgrade can proceed"},
   384  		{loggo.ERROR, failMessage},
   385  	})
   386  
   387  	statusMessage := fmt.Sprintf(
   388  		`upgrade to %s failed (giving up): %s`, jujuversion.Current, causeMessage)
   389  	c.Assert(statusCalls, jc.DeepEquals, []StatusCall{{
   390  		status.StatusError, statusMessage,
   391  	}})
   392  }
   393  
   394  // Run just the upgradesteps worker with a fake machine agent and
   395  // fake agent config.
   396  func (s *UpgradeSuite) runUpgradeWorker(c *gc.C, jobs ...multiwatcher.MachineJob) (
   397  	error, *fakeConfigSetter, []StatusCall, gate.Lock,
   398  ) {
   399  	s.setInstantRetryStrategy(c)
   400  	config := s.makeFakeConfig()
   401  	agent := NewFakeAgent(config)
   402  	doneLock, err := NewLock(agent)
   403  	c.Assert(err, jc.ErrorIsNil)
   404  	machineStatus := &testStatusSetter{}
   405  	worker, err := NewWorker(doneLock, agent, nil, jobs, s.openStateForUpgrade, s.preUpgradeSteps, machineStatus)
   406  	c.Assert(err, jc.ErrorIsNil)
   407  	return worker.Wait(), config, machineStatus.Calls, doneLock
   408  }
   409  
   410  func (s *UpgradeSuite) openStateForUpgrade() (*state.State, error) {
   411  	mongoInfo := s.State.MongoConnectionInfo()
   412  	st, err := state.Open(s.State.ModelTag(), mongoInfo, mongo.DefaultDialOpts(), environs.NewStatePolicy())
   413  	if err != nil {
   414  		return nil, err
   415  	}
   416  	return st, nil
   417  }
   418  
   419  func (s *UpgradeSuite) preUpgradeSteps(st *state.State, agentConf agent.Config, isController, isMasterController bool) error {
   420  	if s.preUpgradeError {
   421  		return errors.New("preupgrade error")
   422  	}
   423  	return nil
   424  }
   425  
   426  func (s *UpgradeSuite) makeFakeConfig() *fakeConfigSetter {
   427  	return NewFakeConfigSetter(names.NewMachineTag("0"), s.oldVersion.Number)
   428  }
   429  
   430  func (s *UpgradeSuite) create3Controllers(c *gc.C) (machineIdA, machineIdB, machineIdC string) {
   431  	machine0 := s.Factory.MakeMachine(c, &factory.MachineParams{
   432  		Jobs: []state.MachineJob{state.JobManageModel},
   433  	})
   434  	machineIdA = machine0.Id()
   435  	s.setMachineAlive(c, machineIdA)
   436  
   437  	changes, err := s.State.EnableHA(3, constraints.Value{}, "quantal", nil)
   438  	c.Assert(err, jc.ErrorIsNil)
   439  	c.Assert(len(changes.Added), gc.Equals, 2)
   440  
   441  	machineIdB = changes.Added[0]
   442  	s.setMachineProvisioned(c, machineIdB)
   443  	s.setMachineAlive(c, machineIdB)
   444  
   445  	machineIdC = changes.Added[1]
   446  	s.setMachineProvisioned(c, machineIdC)
   447  	s.setMachineAlive(c, machineIdC)
   448  
   449  	return
   450  }
   451  
   452  func (s *UpgradeSuite) setMachineProvisioned(c *gc.C, id string) {
   453  	machine, err := s.State.Machine(id)
   454  	c.Assert(err, jc.ErrorIsNil)
   455  	err = machine.SetProvisioned(instance.Id(id+"-inst"), "nonce", nil)
   456  	c.Assert(err, jc.ErrorIsNil)
   457  }
   458  
   459  func (s *UpgradeSuite) setMachineAlive(c *gc.C, id string) {
   460  	machine, err := s.State.Machine(id)
   461  	c.Assert(err, jc.ErrorIsNil)
   462  	pinger, err := machine.SetAgentPresence()
   463  	c.Assert(err, jc.ErrorIsNil)
   464  	s.AddCleanup(func(c *gc.C) { pinger.Stop() })
   465  }
   466  
   467  // Return a version the same as the current software version, but with
   468  // the build number bumped.
   469  //
   470  // The version Tag is also cleared so that upgrades.PerformUpgrade
   471  // doesn't think it needs to run upgrade steps unnecessarily.
   472  func makeBumpedCurrentVersion() version.Binary {
   473  	v := version.Binary{
   474  		Number: jujuversion.Current,
   475  		Arch:   arch.HostArch(),
   476  		Series: series.HostSeries(),
   477  	}
   478  	v.Build++
   479  	v.Tag = ""
   480  	return v
   481  }
   482  
   483  const maxUpgradeRetries = 3
   484  
   485  func (s *UpgradeSuite) setInstantRetryStrategy(c *gc.C) {
   486  	s.PatchValue(&getUpgradeRetryStrategy, func() utils.AttemptStrategy {
   487  		c.Logf("setting instant retry strategy for upgrade: retries=%d", maxUpgradeRetries)
   488  		return utils.AttemptStrategy{
   489  			Delay: 0,
   490  			Min:   maxUpgradeRetries,
   491  		}
   492  	})
   493  }
   494  
   495  func (s *UpgradeSuite) makeExpectedStatusCalls(retryCount int, expectFail bool, failReason string) []StatusCall {
   496  	calls := []StatusCall{{
   497  		status.StatusStarted,
   498  		fmt.Sprintf("upgrading to %s", jujuversion.Current),
   499  	}}
   500  	for i := 0; i < retryCount; i++ {
   501  		calls = append(calls, StatusCall{
   502  			status.StatusError,
   503  			fmt.Sprintf("upgrade to %s failed (will retry): %s", jujuversion.Current, failReason),
   504  		})
   505  	}
   506  	if expectFail {
   507  		calls = append(calls, StatusCall{
   508  			status.StatusError,
   509  			fmt.Sprintf("upgrade to %s failed (giving up): %s", jujuversion.Current, failReason),
   510  		})
   511  	} else {
   512  		calls = append(calls, StatusCall{status.StatusStarted, ""})
   513  	}
   514  	return calls
   515  }
   516  
   517  func (s *UpgradeSuite) makeExpectedUpgradeLogs(retryCount int, target string, expectFail bool, failReason string) []jc.SimpleMessage {
   518  	outLogs := []jc.SimpleMessage{}
   519  
   520  	if target == "databaseMaster" || target == "controller" {
   521  		outLogs = append(outLogs, jc.SimpleMessage{
   522  			loggo.INFO, "waiting for other controllers to be ready for upgrade",
   523  		})
   524  		var waitMsg string
   525  		switch target {
   526  		case "databaseMaster":
   527  			waitMsg = "all controllers are ready to run upgrade steps"
   528  		case "controller":
   529  			waitMsg = "the master has completed its upgrade steps"
   530  		}
   531  		outLogs = append(outLogs, jc.SimpleMessage{loggo.INFO, "finished waiting - " + waitMsg})
   532  	}
   533  
   534  	outLogs = append(outLogs, jc.SimpleMessage{
   535  		loggo.INFO, fmt.Sprintf(
   536  			`starting upgrade from %s to %s for "machine-0"`,
   537  			s.oldVersion.Number, jujuversion.Current),
   538  	})
   539  
   540  	failMessage := fmt.Sprintf(
   541  		`upgrade from %s to %s for "machine-0" failed \(%%s\): %s`,
   542  		s.oldVersion.Number, jujuversion.Current, failReason)
   543  
   544  	for i := 0; i < retryCount; i++ {
   545  		outLogs = append(outLogs, jc.SimpleMessage{loggo.ERROR, fmt.Sprintf(failMessage, "will retry")})
   546  	}
   547  	if expectFail {
   548  		outLogs = append(outLogs, jc.SimpleMessage{loggo.ERROR, fmt.Sprintf(failMessage, "giving up")})
   549  	} else {
   550  		outLogs = append(outLogs, jc.SimpleMessage{loggo.INFO,
   551  			fmt.Sprintf(`upgrade to %s completed successfully.`, jujuversion.Current)})
   552  	}
   553  	return outLogs
   554  }
   555  
   556  func (s *UpgradeSuite) assertEnvironAgentVersion(c *gc.C, expected version.Number) {
   557  	envConfig, err := s.State.ModelConfig()
   558  	c.Assert(err, jc.ErrorIsNil)
   559  	agentVersion, ok := envConfig.AgentVersion()
   560  	c.Assert(ok, jc.IsTrue)
   561  	c.Assert(agentVersion, gc.Equals, expected)
   562  }
   563  
   564  // NewFakeConfigSetter returns a fakeConfigSetter which implements
   565  // just enough of the agent.ConfigSetter interface to keep the upgrade
   566  // steps worker happy.
   567  func NewFakeConfigSetter(agentTag names.Tag, initialVersion version.Number) *fakeConfigSetter {
   568  	return &fakeConfigSetter{
   569  		AgentTag: agentTag,
   570  		Version:  initialVersion,
   571  	}
   572  }
   573  
   574  type fakeConfigSetter struct {
   575  	agent.ConfigSetter
   576  	AgentTag names.Tag
   577  	Version  version.Number
   578  }
   579  
   580  func (s *fakeConfigSetter) Tag() names.Tag {
   581  	return s.AgentTag
   582  }
   583  
   584  func (s *fakeConfigSetter) UpgradedToVersion() version.Number {
   585  	return s.Version
   586  }
   587  
   588  func (s *fakeConfigSetter) SetUpgradedToVersion(newVersion version.Number) {
   589  	s.Version = newVersion
   590  }
   591  
   592  // NewFakeAgent returns a fakeAgent which implements the agent.Agent
   593  // interface. This provides enough MachineAgent functionality to
   594  // support upgrades.
   595  func NewFakeAgent(confSetter agent.ConfigSetter) *fakeAgent {
   596  	return &fakeAgent{
   597  		config: confSetter,
   598  	}
   599  }
   600  
   601  type fakeAgent struct {
   602  	config agent.ConfigSetter
   603  }
   604  
   605  func (a *fakeAgent) CurrentConfig() agent.Config {
   606  	return a.config
   607  }
   608  
   609  func (a *fakeAgent) ChangeConfig(mutate agent.ConfigMutator) error {
   610  	return mutate(a.config)
   611  }
   612  
   613  type StatusCall struct {
   614  	Status status.Status
   615  	Info   string
   616  }
   617  
   618  type testStatusSetter struct {
   619  	Calls []StatusCall
   620  }
   621  
   622  func (s *testStatusSetter) SetStatus(status status.Status, info string, _ map[string]interface{}) error {
   623  	s.Calls = append(s.Calls, StatusCall{status, info})
   624  	return nil
   625  }