github.com/altoros/juju-vmware@v0.0.0-20150312064031-f19ae857ccca/cmd/jujud/agent/machine_test.go (about)

     1  // Copyright 2012, 2013 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package agent
     5  
     6  import (
     7  	"fmt"
     8  	"io/ioutil"
     9  	"os"
    10  	"path/filepath"
    11  	"reflect"
    12  	"strings"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/juju/cmd"
    17  	"github.com/juju/errors"
    18  	"github.com/juju/names"
    19  	gitjujutesting "github.com/juju/testing"
    20  	jc "github.com/juju/testing/checkers"
    21  	"github.com/juju/utils/featureflag"
    22  	"github.com/juju/utils/proxy"
    23  	"github.com/juju/utils/set"
    24  	"github.com/juju/utils/symlink"
    25  	gc "gopkg.in/check.v1"
    26  	"gopkg.in/juju/charm.v4"
    27  
    28  	"github.com/juju/juju/agent"
    29  	"github.com/juju/juju/api"
    30  	apideployer "github.com/juju/juju/api/deployer"
    31  	apienvironment "github.com/juju/juju/api/environment"
    32  	apifirewaller "github.com/juju/juju/api/firewaller"
    33  	apimetricsmanager "github.com/juju/juju/api/metricsmanager"
    34  	apinetworker "github.com/juju/juju/api/networker"
    35  	apirsyslog "github.com/juju/juju/api/rsyslog"
    36  	charmtesting "github.com/juju/juju/apiserver/charmrevisionupdater/testing"
    37  	"github.com/juju/juju/apiserver/params"
    38  	"github.com/juju/juju/cert"
    39  	agenttesting "github.com/juju/juju/cmd/jujud/agent/testing"
    40  	cmdutil "github.com/juju/juju/cmd/jujud/util"
    41  	lxctesting "github.com/juju/juju/container/lxc/testing"
    42  	"github.com/juju/juju/environs/config"
    43  	envtesting "github.com/juju/juju/environs/testing"
    44  	"github.com/juju/juju/instance"
    45  	"github.com/juju/juju/juju"
    46  	"github.com/juju/juju/juju/osenv"
    47  	jujutesting "github.com/juju/juju/juju/testing"
    48  	"github.com/juju/juju/mongo"
    49  	"github.com/juju/juju/network"
    50  	"github.com/juju/juju/provider/dummy"
    51  	"github.com/juju/juju/service/upstart"
    52  	"github.com/juju/juju/state"
    53  	"github.com/juju/juju/state/watcher"
    54  	"github.com/juju/juju/storage"
    55  	coretesting "github.com/juju/juju/testing"
    56  	"github.com/juju/juju/testing/factory"
    57  	"github.com/juju/juju/tools"
    58  	"github.com/juju/juju/utils/ssh"
    59  	sshtesting "github.com/juju/juju/utils/ssh/testing"
    60  	"github.com/juju/juju/version"
    61  	"github.com/juju/juju/worker"
    62  	"github.com/juju/juju/worker/authenticationworker"
    63  	"github.com/juju/juju/worker/certupdater"
    64  	"github.com/juju/juju/worker/deployer"
    65  	"github.com/juju/juju/worker/diskmanager"
    66  	"github.com/juju/juju/worker/instancepoller"
    67  	"github.com/juju/juju/worker/networker"
    68  	"github.com/juju/juju/worker/peergrouper"
    69  	"github.com/juju/juju/worker/proxyupdater"
    70  	"github.com/juju/juju/worker/rsyslog"
    71  	"github.com/juju/juju/worker/singular"
    72  	"github.com/juju/juju/worker/upgrader"
    73  )
    74  
    75  var (
    76  	_ = gc.Suite(&MachineSuite{})
    77  	_ = gc.Suite(&MachineWithCharmsSuite{})
    78  	_ = gc.Suite(&mongoSuite{})
    79  )
    80  
    81  func TestPackage(t *testing.T) {
    82  	// Change the default init dir in worker/deployer,
    83  	// so the deployer doesn't try to remove upstart
    84  	// jobs from tests.
    85  	restore := gitjujutesting.PatchValue(&deployer.InitDir, mkdtemp("juju-worker-deployer"))
    86  	defer restore()
    87  
    88  	// TODO(waigani) 2014-03-19 bug 1294458
    89  	// Refactor to use base suites
    90  
    91  	// Change the path to "juju-run", so that the
    92  	// tests don't try to write to /usr/local/bin.
    93  	JujuRun = mktemp("juju-run", "")
    94  	defer os.Remove(JujuRun)
    95  
    96  	coretesting.MgoTestPackage(t)
    97  }
    98  
    99  type commonMachineSuite struct {
   100  	singularRecord *singularRunnerRecord
   101  	lxctesting.TestSuite
   102  	fakeEnsureMongo agenttesting.FakeEnsure
   103  	AgentSuite
   104  }
   105  
   106  func (s *commonMachineSuite) SetUpSuite(c *gc.C) {
   107  	s.AgentSuite.SetUpSuite(c)
   108  	s.TestSuite.SetUpSuite(c)
   109  }
   110  
   111  func (s *commonMachineSuite) TearDownSuite(c *gc.C) {
   112  	s.TestSuite.TearDownSuite(c)
   113  	s.AgentSuite.TearDownSuite(c)
   114  }
   115  
   116  func (s *commonMachineSuite) SetUpTest(c *gc.C) {
   117  	s.AgentSuite.SetUpTest(c)
   118  	s.TestSuite.SetUpTest(c)
   119  	s.AgentSuite.PatchValue(&charm.CacheDir, c.MkDir())
   120  	s.AgentSuite.PatchValue(&stateWorkerDialOpts, mongo.DialOpts{})
   121  
   122  	os.Remove(JujuRun) // ignore error; may not exist
   123  	// Patch ssh user to avoid touching ~ubuntu/.ssh/authorized_keys.
   124  	s.AgentSuite.PatchValue(&authenticationworker.SSHUser, "")
   125  
   126  	testpath := c.MkDir()
   127  	s.AgentSuite.PatchEnvPathPrepend(testpath)
   128  	// mock out the start method so we can fake install services without sudo
   129  	fakeCmd(filepath.Join(testpath, "start"))
   130  	fakeCmd(filepath.Join(testpath, "stop"))
   131  
   132  	s.AgentSuite.PatchValue(&upstart.InitDir, c.MkDir())
   133  
   134  	s.singularRecord = newSingularRunnerRecord()
   135  	s.AgentSuite.PatchValue(&newSingularRunner, s.singularRecord.newSingularRunner)
   136  	s.AgentSuite.PatchValue(&peergrouperNew, func(st *state.State) (worker.Worker, error) {
   137  		return newDummyWorker(), nil
   138  	})
   139  
   140  	s.fakeEnsureMongo = agenttesting.FakeEnsure{}
   141  	s.AgentSuite.PatchValue(&cmdutil.EnsureMongoServer, s.fakeEnsureMongo.FakeEnsureMongo)
   142  	s.AgentSuite.PatchValue(&maybeInitiateMongoServer, s.fakeEnsureMongo.FakeInitiateMongo)
   143  }
   144  
   145  func fakeCmd(path string) {
   146  	err := ioutil.WriteFile(path, []byte("#!/bin/bash --norc\nexit 0"), 0755)
   147  	if err != nil {
   148  		panic(err)
   149  	}
   150  }
   151  
   152  func (s *commonMachineSuite) TearDownTest(c *gc.C) {
   153  	s.TestSuite.TearDownTest(c)
   154  	s.AgentSuite.TearDownTest(c)
   155  }
   156  
   157  // primeAgent adds a new Machine to run the given jobs, and sets up the
   158  // machine agent's directory.  It returns the new machine, the
   159  // agent's configuration and the tools currently running.
   160  func (s *commonMachineSuite) primeAgent(
   161  	c *gc.C, vers version.Binary,
   162  	jobs ...state.MachineJob) (m *state.Machine, agentConfig agent.ConfigSetterWriter, tools *tools.Tools) {
   163  
   164  	m, err := s.State.AddMachine("quantal", jobs...)
   165  	c.Assert(err, jc.ErrorIsNil)
   166  
   167  	pinger, err := m.SetAgentPresence()
   168  	c.Assert(err, jc.ErrorIsNil)
   169  	s.AddCleanup(func(c *gc.C) {
   170  		err := pinger.Stop()
   171  		c.Check(err, jc.ErrorIsNil)
   172  	})
   173  
   174  	return s.configureMachine(c, m.Id(), vers)
   175  }
   176  
   177  func (s *commonMachineSuite) configureMachine(c *gc.C, machineId string, vers version.Binary) (
   178  	machine *state.Machine, agentConfig agent.ConfigSetterWriter, tools *tools.Tools,
   179  ) {
   180  	m, err := s.State.Machine(machineId)
   181  	c.Assert(err, jc.ErrorIsNil)
   182  
   183  	// Add a machine and ensure it is provisioned.
   184  	inst, md := jujutesting.AssertStartInstance(c, s.Environ, machineId)
   185  	c.Assert(m.SetProvisioned(inst.Id(), agent.BootstrapNonce, md), jc.ErrorIsNil)
   186  
   187  	// Add an address for the tests in case the maybeInitiateMongoServer
   188  	// codepath is exercised.
   189  	s.setFakeMachineAddresses(c, m)
   190  
   191  	// Set up the new machine.
   192  	err = m.SetAgentVersion(vers)
   193  	c.Assert(err, jc.ErrorIsNil)
   194  	err = m.SetPassword(initialMachinePassword)
   195  	c.Assert(err, jc.ErrorIsNil)
   196  	tag := m.Tag()
   197  	if m.IsManager() {
   198  		err = m.SetMongoPassword(initialMachinePassword)
   199  		c.Assert(err, jc.ErrorIsNil)
   200  		agentConfig, tools = s.AgentSuite.PrimeStateAgent(c, tag, initialMachinePassword, vers)
   201  		info, ok := agentConfig.StateServingInfo()
   202  		c.Assert(ok, jc.IsTrue)
   203  		ssi := cmdutil.ParamsStateServingInfoToStateStateServingInfo(info)
   204  		err = s.State.SetStateServingInfo(ssi)
   205  		c.Assert(err, jc.ErrorIsNil)
   206  	} else {
   207  		agentConfig, tools = s.PrimeAgent(c, tag, initialMachinePassword, vers)
   208  	}
   209  	err = agentConfig.Write()
   210  	c.Assert(err, jc.ErrorIsNil)
   211  	return m, agentConfig, tools
   212  }
   213  
   214  // newAgent returns a new MachineAgent instance
   215  func (s *commonMachineSuite) newAgent(c *gc.C, m *state.Machine) *MachineAgent {
   216  	agentConf := AgentConf{DataDir: s.DataDir()}
   217  	agentConf.ReadConfig(names.NewMachineTag(m.Id()).String())
   218  	machineAgentFactory := MachineAgentFactoryFn(&agentConf, &agentConf)
   219  	return machineAgentFactory(m.Id())
   220  }
   221  
   222  func (s *MachineSuite) TestParseSuccess(c *gc.C) {
   223  	create := func() (cmd.Command, *AgentConf) {
   224  		agentConf := AgentConf{DataDir: s.DataDir()}
   225  		a := NewMachineAgentCmd(
   226  			MachineAgentFactoryFn(&agentConf, &agentConf),
   227  			&agentConf,
   228  			&agentConf,
   229  		)
   230  		a.(*machineAgentCmd).logToStdErr = true
   231  
   232  		return a, &agentConf
   233  	}
   234  	a := CheckAgentCommand(c, create, []string{"--machine-id", "42"})
   235  	c.Assert(a.(*machineAgentCmd).machineId, gc.Equals, "42")
   236  }
   237  
   238  type MachineSuite struct {
   239  	commonMachineSuite
   240  	metricAPI *mockMetricAPI
   241  }
   242  
   243  var perEnvSingularWorkers = []string{
   244  	"cleaner",
   245  	"minunitsworker",
   246  	"environ-provisioner",
   247  	"charm-revision-updater",
   248  	"firewaller",
   249  }
   250  
   251  const initialMachinePassword = "machine-password-1234567890"
   252  
   253  func (s *MachineSuite) NewMockMetricAPI() *mockMetricAPI {
   254  	cleanup := make(chan struct{})
   255  	sender := make(chan struct{})
   256  	return &mockMetricAPI{cleanup, sender}
   257  }
   258  
   259  func (s *MachineSuite) SetUpTest(c *gc.C) {
   260  	s.metricAPI = s.NewMockMetricAPI()
   261  	s.PatchValue(&getMetricAPI, func(_ *api.State) apimetricsmanager.MetricsManagerClient {
   262  		return s.metricAPI
   263  	})
   264  	s.commonMachineSuite.SetUpTest(c)
   265  }
   266  
   267  func (s *MachineSuite) TestParseNonsense(c *gc.C) {
   268  	for _, args := range [][]string{
   269  		{},
   270  		{"--machine-id", "-4004"},
   271  	} {
   272  		var agentConf AgentConf
   273  		err := ParseAgentCommand(&machineAgentCmd{agentInitializer: &agentConf}, args)
   274  		c.Assert(err, gc.ErrorMatches, "--machine-id option must be set, and expects a non-negative integer")
   275  	}
   276  }
   277  
   278  func (s *MachineSuite) TestParseUnknown(c *gc.C) {
   279  	var agentConf AgentConf
   280  	a := &machineAgentCmd{agentInitializer: &agentConf}
   281  	err := ParseAgentCommand(a, []string{"--machine-id", "42", "blistering barnacles"})
   282  	c.Assert(err, gc.ErrorMatches, `unrecognized args: \["blistering barnacles"\]`)
   283  }
   284  
   285  func (s *MachineSuite) TestRunInvalidMachineId(c *gc.C) {
   286  	c.Skip("agents don't yet distinguish between temporary and permanent errors")
   287  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   288  	err := s.newAgent(c, m).Run(nil)
   289  	c.Assert(err, gc.ErrorMatches, "some error")
   290  }
   291  
   292  func (s *MachineSuite) TestRunStop(c *gc.C) {
   293  	m, ac, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   294  	a := s.newAgent(c, m)
   295  	done := make(chan error)
   296  	go func() {
   297  		done <- a.Run(nil)
   298  	}()
   299  	err := a.Stop()
   300  	c.Assert(err, jc.ErrorIsNil)
   301  	c.Assert(<-done, jc.ErrorIsNil)
   302  	c.Assert(charm.CacheDir, gc.Equals, filepath.Join(ac.DataDir(), "charmcache"))
   303  }
   304  
   305  func (s *MachineSuite) TestWithDeadMachine(c *gc.C) {
   306  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   307  	err := m.EnsureDead()
   308  	c.Assert(err, jc.ErrorIsNil)
   309  	a := s.newAgent(c, m)
   310  	err = runWithTimeout(a)
   311  	c.Assert(err, jc.ErrorIsNil)
   312  }
   313  
   314  func (s *MachineSuite) TestWithRemovedMachine(c *gc.C) {
   315  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   316  	err := m.EnsureDead()
   317  	c.Assert(err, jc.ErrorIsNil)
   318  	err = m.Remove()
   319  	c.Assert(err, jc.ErrorIsNil)
   320  	a := s.newAgent(c, m)
   321  	err = runWithTimeout(a)
   322  	c.Assert(err, jc.ErrorIsNil)
   323  }
   324  
   325  func (s *MachineSuite) TestDyingMachine(c *gc.C) {
   326  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   327  	a := s.newAgent(c, m)
   328  	done := make(chan error)
   329  	go func() {
   330  		done <- a.Run(nil)
   331  	}()
   332  	defer func() {
   333  		c.Check(a.Stop(), jc.ErrorIsNil)
   334  	}()
   335  	// Wait for configuration to be finished
   336  	<-a.WorkersStarted()
   337  	err := m.Destroy()
   338  	c.Assert(err, jc.ErrorIsNil)
   339  	select {
   340  	case err := <-done:
   341  		c.Assert(err, jc.ErrorIsNil)
   342  	case <-time.After(watcher.Period * 5 / 4):
   343  		// TODO(rog) Fix this so it doesn't wait for so long.
   344  		// https://bugs.github.com/juju/juju/+bug/1163983
   345  		c.Fatalf("timed out waiting for agent to terminate")
   346  	}
   347  	err = m.Refresh()
   348  	c.Assert(err, jc.ErrorIsNil)
   349  	c.Assert(m.Life(), gc.Equals, state.Dead)
   350  }
   351  
   352  func (s *MachineSuite) TestHostUnits(c *gc.C) {
   353  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   354  	a := s.newAgent(c, m)
   355  	ctx, reset := patchDeployContext(c, s.BackingState)
   356  	defer reset()
   357  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   358  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   359  
   360  	// check that unassigned units don't trigger any deployments.
   361  	svc := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
   362  	u0, err := svc.AddUnit()
   363  	c.Assert(err, jc.ErrorIsNil)
   364  	u1, err := svc.AddUnit()
   365  	c.Assert(err, jc.ErrorIsNil)
   366  
   367  	ctx.waitDeployed(c)
   368  
   369  	// assign u0, check it's deployed.
   370  	err = u0.AssignToMachine(m)
   371  	c.Assert(err, jc.ErrorIsNil)
   372  	ctx.waitDeployed(c, u0.Name())
   373  
   374  	// "start the agent" for u0 to prevent short-circuited remove-on-destroy;
   375  	// check that it's kept deployed despite being Dying.
   376  	err = u0.SetStatus(state.StatusActive, "", nil)
   377  	c.Assert(err, jc.ErrorIsNil)
   378  	err = u0.Destroy()
   379  	c.Assert(err, jc.ErrorIsNil)
   380  	ctx.waitDeployed(c, u0.Name())
   381  
   382  	// add u1 to the machine, check it's deployed.
   383  	err = u1.AssignToMachine(m)
   384  	c.Assert(err, jc.ErrorIsNil)
   385  	ctx.waitDeployed(c, u0.Name(), u1.Name())
   386  
   387  	// make u0 dead; check the deployer recalls the unit and removes it from
   388  	// state.
   389  	err = u0.EnsureDead()
   390  	c.Assert(err, jc.ErrorIsNil)
   391  	ctx.waitDeployed(c, u1.Name())
   392  
   393  	// The deployer actually removes the unit just after
   394  	// removing its deployment, so we need to poll here
   395  	// until it actually happens.
   396  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
   397  		err := u0.Refresh()
   398  		if err == nil && attempt.HasNext() {
   399  			continue
   400  		}
   401  		c.Assert(err, jc.Satisfies, errors.IsNotFound)
   402  	}
   403  
   404  	// short-circuit-remove u1 after it's been deployed; check it's recalled
   405  	// and removed from state.
   406  	err = u1.Destroy()
   407  	c.Assert(err, jc.ErrorIsNil)
   408  	err = u1.Refresh()
   409  	c.Assert(err, jc.Satisfies, errors.IsNotFound)
   410  	ctx.waitDeployed(c)
   411  }
   412  
   413  func patchDeployContext(c *gc.C, st *state.State) (*fakeContext, func()) {
   414  	ctx := &fakeContext{
   415  		inited:   make(chan struct{}),
   416  		deployed: make(set.Strings),
   417  	}
   418  	orig := newDeployContext
   419  	newDeployContext = func(dst *apideployer.State, agentConfig agent.Config) deployer.Context {
   420  		ctx.st = st
   421  		ctx.agentConfig = agentConfig
   422  		close(ctx.inited)
   423  		return ctx
   424  	}
   425  	return ctx, func() { newDeployContext = orig }
   426  }
   427  
   428  func (s *commonMachineSuite) setFakeMachineAddresses(c *gc.C, machine *state.Machine) {
   429  	addrs := []network.Address{
   430  		network.NewAddress("0.1.2.3", network.ScopeUnknown),
   431  	}
   432  	err := machine.SetAddresses(addrs...)
   433  	c.Assert(err, jc.ErrorIsNil)
   434  	// Set the addresses in the environ instance as well so that if the instance poller
   435  	// runs it won't overwrite them.
   436  	instId, err := machine.InstanceId()
   437  	c.Assert(err, jc.ErrorIsNil)
   438  	insts, err := s.Environ.Instances([]instance.Id{instId})
   439  	c.Assert(err, jc.ErrorIsNil)
   440  	dummy.SetInstanceAddresses(insts[0], addrs)
   441  }
   442  
   443  func (s *MachineSuite) TestManageEnviron(c *gc.C) {
   444  	usefulVersion := version.Current
   445  	usefulVersion.Series = "quantal" // to match the charm created below
   446  	envtesting.AssertUploadFakeToolsVersions(
   447  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion)
   448  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   449  	op := make(chan dummy.Operation, 200)
   450  	dummy.Listen(op)
   451  
   452  	a := s.newAgent(c, m)
   453  	// Make sure the agent is stopped even if the test fails.
   454  	defer a.Stop()
   455  	done := make(chan error)
   456  	go func() {
   457  		done <- a.Run(nil)
   458  	}()
   459  
   460  	// See state server runners start
   461  	r0 := s.singularRecord.nextRunner(c)
   462  	r0.waitForWorker(c, "resumer")
   463  
   464  	r1 := s.singularRecord.nextRunner(c)
   465  	lastWorker := perEnvSingularWorkers[len(perEnvSingularWorkers)-1]
   466  	r1.waitForWorker(c, lastWorker)
   467  
   468  	// Check that the provisioner and firewaller are alive by doing
   469  	// a rudimentary check that it responds to state changes.
   470  
   471  	// Add one unit to a service; it should get allocated a machine
   472  	// and then its ports should be opened.
   473  	charm := s.AddTestingCharm(c, "dummy")
   474  	svc := s.AddTestingService(c, "test-service", charm)
   475  	err := svc.SetExposed()
   476  	c.Assert(err, jc.ErrorIsNil)
   477  	units, err := juju.AddUnits(s.State, svc, 1, "")
   478  	c.Assert(err, jc.ErrorIsNil)
   479  	c.Check(opRecvTimeout(c, s.State, op, dummy.OpStartInstance{}), gc.NotNil)
   480  
   481  	// Wait for the instance id to show up in the state.
   482  	s.waitProvisioned(c, units[0])
   483  	err = units[0].OpenPort("tcp", 999)
   484  	c.Assert(err, jc.ErrorIsNil)
   485  
   486  	c.Check(opRecvTimeout(c, s.State, op, dummy.OpOpenPorts{}), gc.NotNil)
   487  
   488  	// Check that the metrics workers have started by adding metrics
   489  	select {
   490  	case <-time.After(coretesting.LongWait):
   491  		c.Fatalf("timed out waiting for metric cleanup API to be called")
   492  	case <-s.metricAPI.CleanupCalled():
   493  	}
   494  	select {
   495  	case <-time.After(coretesting.LongWait):
   496  		c.Fatalf("timed out waiting for metric sender API to be called")
   497  	case <-s.metricAPI.SendCalled():
   498  	}
   499  
   500  	err = a.Stop()
   501  	c.Assert(err, jc.ErrorIsNil)
   502  
   503  	select {
   504  	case err := <-done:
   505  		c.Assert(err, jc.ErrorIsNil)
   506  	case <-time.After(5 * time.Second):
   507  		c.Fatalf("timed out waiting for agent to terminate")
   508  	}
   509  }
   510  
   511  const startWorkerWait = 250 * time.Millisecond
   512  
   513  func (s *MachineSuite) TestManageEnvironDoesNotRunFirewallerWhenModeIsNone(c *gc.C) {
   514  	s.PatchValue(&getFirewallMode, func(*api.State) (string, error) {
   515  		return config.FwNone, nil
   516  	})
   517  	started := make(chan struct{})
   518  	s.AgentSuite.PatchValue(&newFirewaller, func(st *apifirewaller.State) (worker.Worker, error) {
   519  		close(started)
   520  		return newDummyWorker(), nil
   521  	})
   522  
   523  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   524  	a := s.newAgent(c, m)
   525  	defer a.Stop()
   526  	go func() {
   527  		c.Check(a.Run(nil), jc.ErrorIsNil)
   528  	}()
   529  
   530  	// Wait for the worker that starts before the firewaller to start.
   531  	_ = s.singularRecord.nextRunner(c)
   532  	r := s.singularRecord.nextRunner(c)
   533  	r.waitForWorker(c, "charm-revision-updater")
   534  
   535  	// Now make sure the firewaller doesn't start.
   536  	select {
   537  	case <-started:
   538  		c.Fatalf("firewaller worker unexpectedly started")
   539  	case <-time.After(startWorkerWait):
   540  	}
   541  }
   542  
   543  func (s *MachineSuite) TestManageEnvironRunsInstancePoller(c *gc.C) {
   544  	s.AgentSuite.PatchValue(&instancepoller.ShortPoll, 500*time.Millisecond)
   545  	usefulVersion := version.Current
   546  	usefulVersion.Series = "quantal" // to match the charm created below
   547  	envtesting.AssertUploadFakeToolsVersions(
   548  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion)
   549  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   550  	a := s.newAgent(c, m)
   551  	defer a.Stop()
   552  	go func() {
   553  		c.Check(a.Run(nil), jc.ErrorIsNil)
   554  	}()
   555  
   556  	// Add one unit to a service;
   557  	charm := s.AddTestingCharm(c, "dummy")
   558  	svc := s.AddTestingService(c, "test-service", charm)
   559  	units, err := juju.AddUnits(s.State, svc, 1, "")
   560  	c.Assert(err, jc.ErrorIsNil)
   561  
   562  	m, instId := s.waitProvisioned(c, units[0])
   563  	insts, err := s.Environ.Instances([]instance.Id{instId})
   564  	c.Assert(err, jc.ErrorIsNil)
   565  	addrs := []network.Address{network.NewAddress("1.2.3.4", network.ScopeUnknown)}
   566  	dummy.SetInstanceAddresses(insts[0], addrs)
   567  	dummy.SetInstanceStatus(insts[0], "running")
   568  
   569  	for a := coretesting.LongAttempt.Start(); a.Next(); {
   570  		if !a.HasNext() {
   571  			c.Logf("final machine addresses: %#v", m.Addresses())
   572  			c.Fatalf("timed out waiting for machine to get address")
   573  		}
   574  		err := m.Refresh()
   575  		c.Assert(err, jc.ErrorIsNil)
   576  		instStatus, err := m.InstanceStatus()
   577  		c.Assert(err, jc.ErrorIsNil)
   578  		if reflect.DeepEqual(m.Addresses(), addrs) && instStatus == "running" {
   579  			break
   580  		}
   581  	}
   582  }
   583  
   584  func (s *MachineSuite) TestManageEnvironRunsPeergrouper(c *gc.C) {
   585  	started := make(chan struct{}, 1)
   586  	s.AgentSuite.PatchValue(&peergrouperNew, func(st *state.State) (worker.Worker, error) {
   587  		c.Check(st, gc.NotNil)
   588  		select {
   589  		case started <- struct{}{}:
   590  		default:
   591  		}
   592  		return newDummyWorker(), nil
   593  	})
   594  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   595  	a := s.newAgent(c, m)
   596  	defer a.Stop()
   597  	go func() {
   598  		c.Check(a.Run(nil), jc.ErrorIsNil)
   599  	}()
   600  	select {
   601  	case <-started:
   602  	case <-time.After(coretesting.LongWait):
   603  		c.Fatalf("timed out waiting for peergrouper worker to be started")
   604  	}
   605  }
   606  
   607  func (s *MachineSuite) TestManageEnvironCallsUseMultipleCPUs(c *gc.C) {
   608  	// If it has been enabled, the JobManageEnviron agent should call utils.UseMultipleCPUs
   609  	usefulVersion := version.Current
   610  	usefulVersion.Series = "quantal"
   611  	envtesting.AssertUploadFakeToolsVersions(
   612  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion)
   613  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   614  	calledChan := make(chan struct{}, 1)
   615  	s.AgentSuite.PatchValue(&useMultipleCPUs, func() { calledChan <- struct{}{} })
   616  	// Now, start the agent, and observe that a JobManageEnviron agent
   617  	// calls UseMultipleCPUs
   618  	a := s.newAgent(c, m)
   619  	defer a.Stop()
   620  	go func() {
   621  		c.Check(a.Run(nil), jc.ErrorIsNil)
   622  	}()
   623  	// Wait for configuration to be finished
   624  	<-a.WorkersStarted()
   625  	select {
   626  	case <-calledChan:
   627  	case <-time.After(coretesting.LongWait):
   628  		c.Errorf("we failed to call UseMultipleCPUs()")
   629  	}
   630  	c.Check(a.Stop(), jc.ErrorIsNil)
   631  	// However, an agent that just JobHostUnits doesn't call UseMultipleCPUs
   632  	m2, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   633  	a2 := s.newAgent(c, m2)
   634  	defer a2.Stop()
   635  	go func() {
   636  		c.Check(a2.Run(nil), jc.ErrorIsNil)
   637  	}()
   638  	// Wait until all the workers have been started, and then kill everything
   639  	<-a2.workersStarted
   640  	c.Check(a2.Stop(), jc.ErrorIsNil)
   641  	select {
   642  	case <-calledChan:
   643  		c.Errorf("we should not have called UseMultipleCPUs()")
   644  	case <-time.After(coretesting.ShortWait):
   645  	}
   646  }
   647  
   648  func (s *MachineSuite) waitProvisioned(c *gc.C, unit *state.Unit) (*state.Machine, instance.Id) {
   649  	c.Logf("waiting for unit %q to be provisioned", unit)
   650  	machineId, err := unit.AssignedMachineId()
   651  	c.Assert(err, jc.ErrorIsNil)
   652  	m, err := s.State.Machine(machineId)
   653  	c.Assert(err, jc.ErrorIsNil)
   654  	w := m.Watch()
   655  	defer w.Stop()
   656  	timeout := time.After(coretesting.LongWait)
   657  	for {
   658  		select {
   659  		case <-timeout:
   660  			c.Fatalf("timed out waiting for provisioning")
   661  		case _, ok := <-w.Changes():
   662  			c.Assert(ok, jc.IsTrue)
   663  			err := m.Refresh()
   664  			c.Assert(err, jc.ErrorIsNil)
   665  			if instId, err := m.InstanceId(); err == nil {
   666  				c.Logf("unit provisioned with instance %s", instId)
   667  				return m, instId
   668  			} else {
   669  				c.Check(err, jc.Satisfies, errors.IsNotProvisioned)
   670  			}
   671  		}
   672  	}
   673  }
   674  
   675  func (s *MachineSuite) testUpgradeRequest(c *gc.C, agent runner, tag string, currentTools *tools.Tools) {
   676  	newVers := version.Current
   677  	newVers.Patch++
   678  	newTools := envtesting.AssertUploadFakeToolsVersions(
   679  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), newVers)[0]
   680  	err := s.State.SetEnvironAgentVersion(newVers.Number)
   681  	c.Assert(err, jc.ErrorIsNil)
   682  	err = runWithTimeout(agent)
   683  	envtesting.CheckUpgraderReadyError(c, err, &upgrader.UpgradeReadyError{
   684  		AgentName: tag,
   685  		OldTools:  currentTools.Version,
   686  		NewTools:  newTools.Version,
   687  		DataDir:   s.DataDir(),
   688  	})
   689  }
   690  
   691  func (s *MachineSuite) TestUpgradeRequest(c *gc.C) {
   692  	m, _, currentTools := s.primeAgent(c, version.Current, state.JobManageEnviron, state.JobHostUnits)
   693  	a := s.newAgent(c, m)
   694  	s.testUpgradeRequest(c, a, m.Tag().String(), currentTools)
   695  }
   696  
   697  var fastDialOpts = api.DialOpts{
   698  	Timeout:    coretesting.LongWait,
   699  	RetryDelay: coretesting.ShortWait,
   700  }
   701  
   702  func (s *MachineSuite) waitStopped(c *gc.C, job state.MachineJob, a *MachineAgent, done chan error) {
   703  	err := a.Stop()
   704  	if job == state.JobManageEnviron {
   705  		// When shutting down, the API server can be shut down before
   706  		// the other workers that connect to it, so they get an error so
   707  		// they then die, causing Stop to return an error.  It's not
   708  		// easy to control the actual error that's received in this
   709  		// circumstance so we just log it rather than asserting that it
   710  		// is not nil.
   711  		if err != nil {
   712  			c.Logf("error shutting down state manager: %v", err)
   713  		}
   714  	} else {
   715  		c.Assert(err, jc.ErrorIsNil)
   716  	}
   717  
   718  	select {
   719  	case err := <-done:
   720  		c.Assert(err, jc.ErrorIsNil)
   721  	case <-time.After(5 * time.Second):
   722  		c.Fatalf("timed out waiting for agent to terminate")
   723  	}
   724  }
   725  
   726  func (s *MachineSuite) assertJobWithAPI(
   727  	c *gc.C,
   728  	job state.MachineJob,
   729  	test func(agent.Config, *api.State),
   730  ) {
   731  	s.assertAgentOpensState(c, &reportOpenedAPI, job, func(cfg agent.Config, st interface{}) {
   732  		test(cfg, st.(*api.State))
   733  	})
   734  }
   735  
   736  func (s *MachineSuite) assertJobWithState(
   737  	c *gc.C,
   738  	job state.MachineJob,
   739  	test func(agent.Config, *state.State),
   740  ) {
   741  	paramsJob := job.ToParams()
   742  	if !paramsJob.NeedsState() {
   743  		c.Fatalf("%v does not use state", paramsJob)
   744  	}
   745  	s.assertAgentOpensState(c, &reportOpenedState, job, func(cfg agent.Config, st interface{}) {
   746  		test(cfg, st.(*state.State))
   747  	})
   748  }
   749  
   750  // assertAgentOpensState asserts that a machine agent started with the
   751  // given job will call the function pointed to by reportOpened. The
   752  // agent's configuration and the value passed to reportOpened are then
   753  // passed to the test function for further checking.
   754  func (s *MachineSuite) assertAgentOpensState(
   755  	c *gc.C,
   756  	reportOpened *func(interface{}),
   757  	job state.MachineJob,
   758  	test func(agent.Config, interface{}),
   759  ) {
   760  	stm, conf, _ := s.primeAgent(c, version.Current, job)
   761  	a := s.newAgent(c, stm)
   762  	defer a.Stop()
   763  	logger.Debugf("new agent %#v", a)
   764  
   765  	// All state jobs currently also run an APIWorker, so no
   766  	// need to check for that here, like in assertJobWithState.
   767  
   768  	agentAPIs := make(chan interface{}, 1)
   769  	s.AgentSuite.PatchValue(reportOpened, func(st interface{}) {
   770  		select {
   771  		case agentAPIs <- st:
   772  		default:
   773  		}
   774  	})
   775  
   776  	done := make(chan error)
   777  	go func() {
   778  		done <- a.Run(nil)
   779  	}()
   780  
   781  	select {
   782  	case agentAPI := <-agentAPIs:
   783  		c.Assert(agentAPI, gc.NotNil)
   784  		test(conf, agentAPI)
   785  	case <-time.After(coretesting.LongWait):
   786  		c.Fatalf("API not opened")
   787  	}
   788  
   789  	s.waitStopped(c, job, a, done)
   790  }
   791  
   792  func (s *MachineSuite) TestManageEnvironServesAPI(c *gc.C) {
   793  	s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) {
   794  		st, err := api.Open(conf.APIInfo(), fastDialOpts)
   795  		c.Assert(err, jc.ErrorIsNil)
   796  		defer st.Close()
   797  		m, err := st.Machiner().Machine(conf.Tag().(names.MachineTag))
   798  		c.Assert(err, jc.ErrorIsNil)
   799  		c.Assert(m.Life(), gc.Equals, params.Alive)
   800  	})
   801  }
   802  
   803  func (s *MachineSuite) assertAgentSetsToolsVersion(c *gc.C, job state.MachineJob) {
   804  	vers := version.Current
   805  	vers.Minor = version.Current.Minor + 1
   806  	m, _, _ := s.primeAgent(c, vers, job)
   807  	a := s.newAgent(c, m)
   808  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   809  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   810  
   811  	timeout := time.After(coretesting.LongWait)
   812  	for done := false; !done; {
   813  		select {
   814  		case <-timeout:
   815  			c.Fatalf("timeout while waiting for agent version to be set")
   816  		case <-time.After(coretesting.ShortWait):
   817  			c.Log("Refreshing")
   818  			err := m.Refresh()
   819  			c.Assert(err, jc.ErrorIsNil)
   820  			c.Log("Fetching agent tools")
   821  			agentTools, err := m.AgentTools()
   822  			c.Assert(err, jc.ErrorIsNil)
   823  			c.Logf("(%v vs. %v)", agentTools.Version, version.Current)
   824  			if agentTools.Version.Minor != version.Current.Minor {
   825  				continue
   826  			}
   827  			c.Assert(agentTools.Version, gc.DeepEquals, version.Current)
   828  			done = true
   829  		}
   830  	}
   831  }
   832  
   833  func (s *MachineSuite) TestAgentSetsToolsVersionManageEnviron(c *gc.C) {
   834  	s.assertAgentSetsToolsVersion(c, state.JobManageEnviron)
   835  }
   836  
   837  func (s *MachineSuite) TestAgentSetsToolsVersionHostUnits(c *gc.C) {
   838  	s.assertAgentSetsToolsVersion(c, state.JobHostUnits)
   839  }
   840  
   841  func (s *MachineSuite) TestManageEnvironRunsCleaner(c *gc.C) {
   842  	s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) {
   843  		// Create a service and unit, and destroy the service.
   844  		service := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
   845  		unit, err := service.AddUnit()
   846  		c.Assert(err, jc.ErrorIsNil)
   847  		err = service.Destroy()
   848  		c.Assert(err, jc.ErrorIsNil)
   849  
   850  		// Check the unit was not yet removed.
   851  		err = unit.Refresh()
   852  		c.Assert(err, jc.ErrorIsNil)
   853  		w := unit.Watch()
   854  		defer w.Stop()
   855  
   856  		// Trigger a sync on the state used by the agent, and wait
   857  		// for the unit to be removed.
   858  		agentState.StartSync()
   859  		timeout := time.After(coretesting.LongWait)
   860  		for done := false; !done; {
   861  			select {
   862  			case <-timeout:
   863  				c.Fatalf("unit not cleaned up")
   864  			case <-time.After(coretesting.ShortWait):
   865  				s.State.StartSync()
   866  			case <-w.Changes():
   867  				err := unit.Refresh()
   868  				if errors.IsNotFound(err) {
   869  					done = true
   870  				} else {
   871  					c.Assert(err, jc.ErrorIsNil)
   872  				}
   873  			}
   874  		}
   875  	})
   876  }
   877  
   878  func (s *MachineSuite) TestJobManageEnvironRunsMinUnitsWorker(c *gc.C) {
   879  	s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) {
   880  		// Ensure that the MinUnits worker is alive by doing a simple check
   881  		// that it responds to state changes: add a service, set its minimum
   882  		// number of units to one, wait for the worker to add the missing unit.
   883  		service := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
   884  		err := service.SetMinUnits(1)
   885  		c.Assert(err, jc.ErrorIsNil)
   886  		w := service.Watch()
   887  		defer w.Stop()
   888  
   889  		// Trigger a sync on the state used by the agent, and wait for the unit
   890  		// to be created.
   891  		agentState.StartSync()
   892  		timeout := time.After(coretesting.LongWait)
   893  		for {
   894  			select {
   895  			case <-timeout:
   896  				c.Fatalf("unit not created")
   897  			case <-time.After(coretesting.ShortWait):
   898  				s.State.StartSync()
   899  			case <-w.Changes():
   900  				units, err := service.AllUnits()
   901  				c.Assert(err, jc.ErrorIsNil)
   902  				if len(units) == 1 {
   903  					return
   904  				}
   905  			}
   906  		}
   907  	})
   908  }
   909  
   910  func (s *MachineSuite) TestMachineAgentRunsAuthorisedKeysWorker(c *gc.C) {
   911  	// Start the machine agent.
   912  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   913  	a := s.newAgent(c, m)
   914  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   915  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   916  
   917  	// Update the keys in the environment.
   918  	sshKey := sshtesting.ValidKeyOne.Key + " user@host"
   919  	err := s.BackingState.UpdateEnvironConfig(map[string]interface{}{"authorized-keys": sshKey}, nil, nil)
   920  	c.Assert(err, jc.ErrorIsNil)
   921  
   922  	// Wait for ssh keys file to be updated.
   923  	s.State.StartSync()
   924  	timeout := time.After(coretesting.LongWait)
   925  	sshKeyWithCommentPrefix := sshtesting.ValidKeyOne.Key + " Juju:user@host"
   926  	for {
   927  		select {
   928  		case <-timeout:
   929  			c.Fatalf("timeout while waiting for authorised ssh keys to change")
   930  		case <-time.After(coretesting.ShortWait):
   931  			keys, err := ssh.ListKeys(authenticationworker.SSHUser, ssh.FullKeys)
   932  			c.Assert(err, jc.ErrorIsNil)
   933  			keysStr := strings.Join(keys, "\n")
   934  			if sshKeyWithCommentPrefix != keysStr {
   935  				continue
   936  			}
   937  			return
   938  		}
   939  	}
   940  }
   941  
   942  // opRecvTimeout waits for any of the given kinds of operation to
   943  // be received from ops, and times out if not.
   944  func opRecvTimeout(c *gc.C, st *state.State, opc <-chan dummy.Operation, kinds ...dummy.Operation) dummy.Operation {
   945  	st.StartSync()
   946  	for {
   947  		select {
   948  		case op := <-opc:
   949  			for _, k := range kinds {
   950  				if reflect.TypeOf(op) == reflect.TypeOf(k) {
   951  					return op
   952  				}
   953  			}
   954  			c.Logf("discarding unknown event %#v", op)
   955  		case <-time.After(15 * time.Second):
   956  			c.Fatalf("time out wating for operation")
   957  		}
   958  	}
   959  }
   960  
   961  func (s *MachineSuite) TestOpenStateFailsForJobHostUnitsButOpenAPIWorks(c *gc.C) {
   962  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   963  	a := s.newAgent(c, m)
   964  	s.RunTestOpenAPIState(c, m, a, initialMachinePassword)
   965  	s.assertJobWithAPI(c, state.JobHostUnits, func(conf agent.Config, st *api.State) {
   966  		s.AssertCannotOpenState(c, conf.Tag(), conf.DataDir())
   967  	})
   968  }
   969  
   970  func (s *MachineSuite) TestOpenStateWorksForJobManageEnviron(c *gc.C) {
   971  	s.assertJobWithAPI(c, state.JobManageEnviron, func(conf agent.Config, st *api.State) {
   972  		s.AssertCanOpenState(c, conf.Tag(), conf.DataDir())
   973  	})
   974  }
   975  
   976  func (s *MachineSuite) TestMachineAgentSymlinkJujuRun(c *gc.C) {
   977  	_, err := os.Stat(JujuRun)
   978  	c.Assert(err, jc.Satisfies, os.IsNotExist)
   979  	s.assertJobWithAPI(c, state.JobManageEnviron, func(conf agent.Config, st *api.State) {
   980  		// juju-run should have been created
   981  		_, err := os.Stat(JujuRun)
   982  		c.Assert(err, jc.ErrorIsNil)
   983  	})
   984  }
   985  
   986  func (s *MachineSuite) TestMachineAgentSymlinkJujuRunExists(c *gc.C) {
   987  	err := symlink.New("/nowhere/special", JujuRun)
   988  	c.Assert(err, jc.ErrorIsNil)
   989  	_, err = os.Stat(JujuRun)
   990  	c.Assert(err, jc.Satisfies, os.IsNotExist)
   991  	s.assertJobWithAPI(c, state.JobManageEnviron, func(conf agent.Config, st *api.State) {
   992  		// juju-run should have been recreated
   993  		_, err := os.Stat(JujuRun)
   994  		c.Assert(err, jc.ErrorIsNil)
   995  		link, err := symlink.Read(JujuRun)
   996  		c.Assert(err, jc.ErrorIsNil)
   997  		c.Assert(link, gc.Not(gc.Equals), "/nowhere/special")
   998  	})
   999  }
  1000  
  1001  func (s *MachineSuite) TestProxyUpdater(c *gc.C) {
  1002  	s.assertProxyUpdater(c, true)
  1003  	s.assertProxyUpdater(c, false)
  1004  }
  1005  
  1006  func (s *MachineSuite) assertProxyUpdater(c *gc.C, expectWriteSystemFiles bool) {
  1007  	// Patch out the func that decides whether we should write system files.
  1008  	var gotConf agent.Config
  1009  	s.AgentSuite.PatchValue(&shouldWriteProxyFiles, func(conf agent.Config) bool {
  1010  		gotConf = conf
  1011  		return expectWriteSystemFiles
  1012  	})
  1013  
  1014  	// Make sure there are some proxy settings to write.
  1015  	expectSettings := proxy.Settings{
  1016  		Http:  "http proxy",
  1017  		Https: "https proxy",
  1018  		Ftp:   "ftp proxy",
  1019  	}
  1020  	updateAttrs := config.ProxyConfigMap(expectSettings)
  1021  	err := s.State.UpdateEnvironConfig(updateAttrs, nil, nil)
  1022  	c.Assert(err, jc.ErrorIsNil)
  1023  
  1024  	// Patch out the actual worker func.
  1025  	started := make(chan struct{})
  1026  	mockNew := func(api *apienvironment.Facade, writeSystemFiles bool) worker.Worker {
  1027  		// Direct check of the behaviour flag.
  1028  		c.Check(writeSystemFiles, gc.Equals, expectWriteSystemFiles)
  1029  		// Indirect check that we get a functional API.
  1030  		conf, err := api.EnvironConfig()
  1031  		if c.Check(err, jc.ErrorIsNil) {
  1032  			actualSettings := conf.ProxySettings()
  1033  			c.Check(actualSettings, jc.DeepEquals, expectSettings)
  1034  		}
  1035  		return worker.NewSimpleWorker(func(_ <-chan struct{}) error {
  1036  			close(started)
  1037  			return nil
  1038  		})
  1039  	}
  1040  	s.AgentSuite.PatchValue(&proxyupdater.New, mockNew)
  1041  
  1042  	s.primeAgent(c, version.Current, state.JobHostUnits)
  1043  	s.assertJobWithAPI(c, state.JobHostUnits, func(conf agent.Config, st *api.State) {
  1044  		for {
  1045  			select {
  1046  			case <-time.After(coretesting.LongWait):
  1047  				c.Fatalf("timeout while waiting for proxy updater to start")
  1048  			case <-started:
  1049  				c.Assert(gotConf, jc.DeepEquals, conf)
  1050  				return
  1051  			}
  1052  		}
  1053  	})
  1054  }
  1055  
  1056  func (s *MachineSuite) TestMachineAgentUninstall(c *gc.C) {
  1057  	m, ac, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1058  	err := m.EnsureDead()
  1059  	c.Assert(err, jc.ErrorIsNil)
  1060  	a := s.newAgent(c, m)
  1061  	err = runWithTimeout(a)
  1062  	c.Assert(err, jc.ErrorIsNil)
  1063  	// juju-run should have been removed on termination
  1064  	_, err = os.Stat(JujuRun)
  1065  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1066  	// data-dir should have been removed on termination
  1067  	_, err = os.Stat(ac.DataDir())
  1068  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1069  }
  1070  
  1071  func (s *MachineSuite) TestMachineAgentRsyslogManageEnviron(c *gc.C) {
  1072  	s.testMachineAgentRsyslogConfigWorker(c, state.JobManageEnviron, rsyslog.RsyslogModeAccumulate)
  1073  }
  1074  
  1075  func (s *MachineSuite) TestMachineAgentRsyslogHostUnits(c *gc.C) {
  1076  	s.testMachineAgentRsyslogConfigWorker(c, state.JobHostUnits, rsyslog.RsyslogModeForwarding)
  1077  }
  1078  
  1079  func (s *MachineSuite) testMachineAgentRsyslogConfigWorker(c *gc.C, job state.MachineJob, expectedMode rsyslog.RsyslogMode) {
  1080  	created := make(chan rsyslog.RsyslogMode, 1)
  1081  	s.AgentSuite.PatchValue(&cmdutil.NewRsyslogConfigWorker, func(_ *apirsyslog.State, _ agent.Config, mode rsyslog.RsyslogMode) (worker.Worker, error) {
  1082  		created <- mode
  1083  		return newDummyWorker(), nil
  1084  	})
  1085  	s.assertJobWithAPI(c, job, func(conf agent.Config, st *api.State) {
  1086  		select {
  1087  		case <-time.After(coretesting.LongWait):
  1088  			c.Fatalf("timeout while waiting for rsyslog worker to be created")
  1089  		case mode := <-created:
  1090  			c.Assert(mode, gc.Equals, expectedMode)
  1091  		}
  1092  	})
  1093  }
  1094  
  1095  func (s *MachineSuite) TestMachineAgentRunsAPIAddressUpdaterWorker(c *gc.C) {
  1096  	// Start the machine agent.
  1097  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1098  	a := s.newAgent(c, m)
  1099  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1100  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1101  
  1102  	// Update the API addresses.
  1103  	updatedServers := [][]network.HostPort{
  1104  		network.NewHostPorts(1234, "localhost"),
  1105  	}
  1106  	err := s.BackingState.SetAPIHostPorts(updatedServers)
  1107  	c.Assert(err, jc.ErrorIsNil)
  1108  
  1109  	// Wait for config to be updated.
  1110  	s.BackingState.StartSync()
  1111  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
  1112  		addrs, err := a.CurrentConfig().APIAddresses()
  1113  		c.Assert(err, jc.ErrorIsNil)
  1114  		if reflect.DeepEqual(addrs, []string{"localhost:1234"}) {
  1115  			return
  1116  		}
  1117  	}
  1118  	c.Fatalf("timeout while waiting for agent config to change")
  1119  }
  1120  
  1121  func (s *MachineSuite) TestMachineAgentRunsDiskManagerWorker(c *gc.C) {
  1122  	// The disk manager should only run with the feature flag set.
  1123  	s.testMachineAgentRunsDiskManagerWorker(c, false, coretesting.ShortWait)
  1124  
  1125  	s.PatchEnvironment(osenv.JujuFeatureFlagEnvKey, "storage")
  1126  	featureflag.SetFlagsFromEnvironment(osenv.JujuFeatureFlagEnvKey)
  1127  	s.testMachineAgentRunsDiskManagerWorker(c, true, coretesting.LongWait)
  1128  }
  1129  
  1130  func (s *MachineSuite) testMachineAgentRunsDiskManagerWorker(c *gc.C, shouldRun bool, timeout time.Duration) {
  1131  	// Start the machine agent.
  1132  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1133  	a := s.newAgent(c, m)
  1134  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1135  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1136  
  1137  	started := make(chan struct{})
  1138  	newWorker := func(diskmanager.ListBlockDevicesFunc, diskmanager.BlockDeviceSetter) worker.Worker {
  1139  		close(started)
  1140  		return worker.NewNoOpWorker()
  1141  	}
  1142  	s.PatchValue(&newDiskManager, newWorker)
  1143  
  1144  	// Wait for worker to be started.
  1145  	select {
  1146  	case <-started:
  1147  		if !shouldRun {
  1148  			c.Fatalf("disk manager should not run without feature flag")
  1149  		}
  1150  	case <-time.After(timeout):
  1151  		if shouldRun {
  1152  			c.Fatalf("timeout while waiting for diskmanager worker to start")
  1153  		}
  1154  	}
  1155  }
  1156  
  1157  func (s *MachineSuite) TestDiskManagerWorkerUpdatesState(c *gc.C) {
  1158  	s.PatchEnvironment(osenv.JujuFeatureFlagEnvKey, "storage")
  1159  	featureflag.SetFlagsFromEnvironment(osenv.JujuFeatureFlagEnvKey)
  1160  
  1161  	expected := []storage.BlockDevice{{DeviceName: "whatever"}}
  1162  	s.PatchValue(&diskmanager.DefaultListBlockDevices, func() ([]storage.BlockDevice, error) {
  1163  		return expected, nil
  1164  	})
  1165  
  1166  	// Start the machine agent.
  1167  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1168  	a := s.newAgent(c, m)
  1169  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1170  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1171  
  1172  	// Wait for state to be updated.
  1173  	s.BackingState.StartSync()
  1174  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
  1175  		devices, err := m.BlockDevices()
  1176  		c.Assert(err, jc.ErrorIsNil)
  1177  		if len(devices) > 0 {
  1178  			c.Assert(devices, gc.HasLen, 1)
  1179  			info, err := devices[0].Info()
  1180  			c.Assert(err, jc.ErrorIsNil)
  1181  			c.Assert(info.DeviceName, gc.Equals, expected[0].DeviceName)
  1182  			return
  1183  		}
  1184  	}
  1185  	c.Fatalf("timeout while waiting for block devices to be recorded")
  1186  }
  1187  
  1188  func (s *MachineSuite) TestMachineAgentRunsCertificateUpdateWorkerForStateServer(c *gc.C) {
  1189  	started := make(chan struct{})
  1190  	newUpdater := func(certupdater.AddressWatcher, certupdater.StateServingInfoGetter, certupdater.EnvironConfigGetter,
  1191  		certupdater.StateServingInfoSetter, chan params.StateServingInfo,
  1192  	) worker.Worker {
  1193  		close(started)
  1194  		return worker.NewNoOpWorker()
  1195  	}
  1196  	s.PatchValue(&newCertificateUpdater, newUpdater)
  1197  
  1198  	// Start the machine agent.
  1199  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1200  	a := s.newAgent(c, m)
  1201  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1202  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1203  
  1204  	// Wait for worker to be started.
  1205  	select {
  1206  	case <-started:
  1207  	case <-time.After(coretesting.LongWait):
  1208  		c.Fatalf("timeout while waiting for certificate update worker to start")
  1209  	}
  1210  }
  1211  
  1212  func (s *MachineSuite) TestMachineAgentDoesNotRunsCertificateUpdateWorkerForNonStateServer(c *gc.C) {
  1213  	started := make(chan struct{})
  1214  	newUpdater := func(certupdater.AddressWatcher, certupdater.StateServingInfoGetter, certupdater.EnvironConfigGetter,
  1215  		certupdater.StateServingInfoSetter, chan params.StateServingInfo,
  1216  	) worker.Worker {
  1217  		close(started)
  1218  		return worker.NewNoOpWorker()
  1219  	}
  1220  	s.PatchValue(&newCertificateUpdater, newUpdater)
  1221  
  1222  	// Start the machine agent.
  1223  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1224  	a := s.newAgent(c, m)
  1225  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1226  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1227  
  1228  	// Ensure the worker is not started.
  1229  	select {
  1230  	case <-started:
  1231  		c.Fatalf("certificate update worker unexpectedly started")
  1232  	case <-time.After(coretesting.ShortWait):
  1233  	}
  1234  }
  1235  
  1236  func (s *MachineSuite) TestCertificateUpdateWorkerUpdatesCertificate(c *gc.C) {
  1237  	// Set up the machine agent.
  1238  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1239  	a := s.newAgent(c, m)
  1240  	a.ReadConfig(names.NewMachineTag(m.Id()).String())
  1241  
  1242  	// Set up check that certificate has been updated.
  1243  	updated := make(chan struct{})
  1244  	go func() {
  1245  		for {
  1246  			stateInfo, _ := a.CurrentConfig().StateServingInfo()
  1247  			srvCert, err := cert.ParseCert(stateInfo.Cert)
  1248  			c.Assert(err, jc.ErrorIsNil)
  1249  			sanIPs := make([]string, len(srvCert.IPAddresses))
  1250  			for i, ip := range srvCert.IPAddresses {
  1251  				sanIPs[i] = ip.String()
  1252  			}
  1253  			if len(sanIPs) == 1 && sanIPs[0] == "0.1.2.3" {
  1254  				close(updated)
  1255  				break
  1256  			}
  1257  			time.Sleep(10 * time.Millisecond)
  1258  		}
  1259  	}()
  1260  
  1261  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1262  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1263  	// Wait for certificate to be updated.
  1264  	select {
  1265  	case <-updated:
  1266  	case <-time.After(coretesting.LongWait):
  1267  		c.Fatalf("timeout while waiting for certificate to be updated")
  1268  	}
  1269  }
  1270  
  1271  func (s *MachineSuite) TestMachineAgentNetworkerMode(c *gc.C) {
  1272  	tests := []struct {
  1273  		about          string
  1274  		managedNetwork bool
  1275  		jobs           []state.MachineJob
  1276  		intrusiveMode  bool
  1277  	}{{
  1278  		about:          "network management enabled, network management job set",
  1279  		managedNetwork: true,
  1280  		jobs:           []state.MachineJob{state.JobHostUnits, state.JobManageNetworking},
  1281  		intrusiveMode:  true,
  1282  	}, {
  1283  		about:          "network management disabled, network management job set",
  1284  		managedNetwork: false,
  1285  		jobs:           []state.MachineJob{state.JobHostUnits, state.JobManageNetworking},
  1286  		intrusiveMode:  false,
  1287  	}, {
  1288  		about:          "network management enabled, network management job not set",
  1289  		managedNetwork: true,
  1290  		jobs:           []state.MachineJob{state.JobHostUnits},
  1291  		intrusiveMode:  false,
  1292  	}, {
  1293  		about:          "network management disabled, network management job not set",
  1294  		managedNetwork: false,
  1295  		jobs:           []state.MachineJob{state.JobHostUnits},
  1296  		intrusiveMode:  false,
  1297  	}}
  1298  	// Perform tests.
  1299  	for i, test := range tests {
  1300  		c.Logf("test #%d: %s", i, test.about)
  1301  
  1302  		modeCh := make(chan bool, 1)
  1303  		s.AgentSuite.PatchValue(&newNetworker, func(
  1304  			st *apinetworker.State,
  1305  			conf agent.Config,
  1306  			intrusiveMode bool,
  1307  			configBaseDir string,
  1308  		) (*networker.Networker, error) {
  1309  			select {
  1310  			case modeCh <- intrusiveMode:
  1311  			default:
  1312  			}
  1313  			return networker.NewNetworker(st, conf, intrusiveMode, configBaseDir)
  1314  		})
  1315  
  1316  		attrs := coretesting.Attrs{"disable-network-management": !test.managedNetwork}
  1317  		err := s.BackingState.UpdateEnvironConfig(attrs, nil, nil)
  1318  		c.Assert(err, jc.ErrorIsNil)
  1319  
  1320  		m, _, _ := s.primeAgent(c, version.Current, test.jobs...)
  1321  		a := s.newAgent(c, m)
  1322  		defer a.Stop()
  1323  		doneCh := make(chan error)
  1324  		go func() {
  1325  			doneCh <- a.Run(nil)
  1326  		}()
  1327  
  1328  		select {
  1329  		case intrusiveMode := <-modeCh:
  1330  			if intrusiveMode != test.intrusiveMode {
  1331  				c.Fatalf("expected networker intrusive mode = %v, got mode = %v", test.intrusiveMode, intrusiveMode)
  1332  			}
  1333  		case <-time.After(coretesting.LongWait):
  1334  			c.Fatalf("timed out waiting for the networker to start")
  1335  		}
  1336  		s.waitStopped(c, state.JobManageNetworking, a, doneCh)
  1337  	}
  1338  }
  1339  
  1340  func (s *MachineSuite) TestMachineAgentUpgradeMongo(c *gc.C) {
  1341  	m, agentConfig, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1342  	agentConfig.SetUpgradedToVersion(version.MustParse("1.18.0"))
  1343  	err := agentConfig.Write()
  1344  	c.Assert(err, jc.ErrorIsNil)
  1345  	err = s.State.MongoSession().DB("admin").RemoveUser(m.Tag().String())
  1346  	c.Assert(err, jc.ErrorIsNil)
  1347  
  1348  	s.AgentSuite.PatchValue(&ensureMongoAdminUser, func(p mongo.EnsureAdminUserParams) (bool, error) {
  1349  		err := s.State.MongoSession().DB("admin").AddUser(p.User, p.Password, false)
  1350  		c.Assert(err, jc.ErrorIsNil)
  1351  		return true, nil
  1352  	})
  1353  
  1354  	stateOpened := make(chan interface{}, 1)
  1355  	s.AgentSuite.PatchValue(&reportOpenedState, func(st interface{}) {
  1356  		select {
  1357  		case stateOpened <- st:
  1358  		default:
  1359  		}
  1360  	})
  1361  
  1362  	// Start the machine agent, and wait for state to be opened.
  1363  	a := s.newAgent(c, m)
  1364  	done := make(chan error)
  1365  	go func() { done <- a.Run(nil) }()
  1366  	defer a.Stop() // in case of failure
  1367  	select {
  1368  	case st := <-stateOpened:
  1369  		c.Assert(st, gc.NotNil)
  1370  	case <-time.After(coretesting.LongWait):
  1371  		c.Fatalf("state not opened")
  1372  	}
  1373  	s.waitStopped(c, state.JobManageEnviron, a, done)
  1374  	c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1)
  1375  	c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 1)
  1376  }
  1377  
  1378  func (s *MachineSuite) TestMachineAgentSetsPrepareRestore(c *gc.C) {
  1379  	// Start the machine agent.
  1380  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1381  	a := s.newAgent(c, m)
  1382  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1383  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1384  	c.Check(a.IsRestorePreparing(), jc.IsFalse)
  1385  	c.Check(a.IsRestoreRunning(), jc.IsFalse)
  1386  	err := a.PrepareRestore()
  1387  	c.Assert(err, jc.ErrorIsNil)
  1388  	c.Assert(a.IsRestorePreparing(), jc.IsTrue)
  1389  	c.Assert(a.IsRestoreRunning(), jc.IsFalse)
  1390  	err = a.PrepareRestore()
  1391  	c.Assert(err, gc.ErrorMatches, "already in restore mode")
  1392  }
  1393  
  1394  func (s *MachineSuite) TestMachineAgentSetsRestoreInProgress(c *gc.C) {
  1395  	// Start the machine agent.
  1396  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1397  	a := s.newAgent(c, m)
  1398  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1399  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1400  	c.Check(a.IsRestorePreparing(), jc.IsFalse)
  1401  	c.Check(a.IsRestoreRunning(), jc.IsFalse)
  1402  	err := a.PrepareRestore()
  1403  	c.Assert(err, jc.ErrorIsNil)
  1404  	c.Assert(a.IsRestorePreparing(), jc.IsTrue)
  1405  	err = a.BeginRestore()
  1406  	c.Assert(err, jc.ErrorIsNil)
  1407  	c.Assert(a.IsRestoreRunning(), jc.IsTrue)
  1408  	err = a.BeginRestore()
  1409  	c.Assert(err, gc.ErrorMatches, "already restoring")
  1410  }
  1411  
  1412  func (s *MachineSuite) TestMachineAgentRestoreRequiresPrepare(c *gc.C) {
  1413  	// Start the machine agent.
  1414  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1415  	a := s.newAgent(c, m)
  1416  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1417  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1418  	c.Check(a.IsRestorePreparing(), jc.IsFalse)
  1419  	c.Check(a.IsRestoreRunning(), jc.IsFalse)
  1420  	err := a.BeginRestore()
  1421  	c.Assert(err, gc.ErrorMatches, "not in restore mode, cannot begin restoration")
  1422  	c.Assert(a.IsRestoreRunning(), jc.IsFalse)
  1423  }
  1424  
  1425  func (s *MachineSuite) TestNewEnvironmentStartsNewWorkers(c *gc.C) {
  1426  	s.PatchValue(&watcher.Period, 100*time.Millisecond)
  1427  
  1428  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1429  	a := s.newAgent(c, m)
  1430  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1431  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1432  
  1433  	_ = s.singularRecord.nextRunner(c) // Don't care about this one for this test.
  1434  
  1435  	// Wait for the workers for the initial env to start. The
  1436  	// firewaller is the last worker started for a new environment.
  1437  	r0 := s.singularRecord.nextRunner(c)
  1438  	workers := r0.waitForWorker(c, "firewaller")
  1439  	c.Assert(workers, jc.DeepEquals, perEnvSingularWorkers)
  1440  
  1441  	// Now create a new environment and see the workers start for it.
  1442  	factory.NewFactory(s.State).MakeEnvironment(c, &factory.EnvParams{
  1443  		ConfigAttrs: map[string]interface{}{
  1444  			"state-server": false,
  1445  		},
  1446  		Prepare: true,
  1447  	}).Close()
  1448  	r1 := s.singularRecord.nextRunner(c)
  1449  	workers = r1.waitForWorker(c, "firewaller")
  1450  	c.Assert(workers, jc.DeepEquals, perEnvSingularWorkers)
  1451  }
  1452  
  1453  // MachineWithCharmsSuite provides infrastructure for tests which need to
  1454  // work with charms.
  1455  type MachineWithCharmsSuite struct {
  1456  	commonMachineSuite
  1457  	charmtesting.CharmSuite
  1458  
  1459  	machine *state.Machine
  1460  }
  1461  
  1462  func (s *MachineWithCharmsSuite) SetUpSuite(c *gc.C) {
  1463  	s.commonMachineSuite.SetUpSuite(c)
  1464  	s.CharmSuite.SetUpSuite(c, &s.commonMachineSuite.JujuConnSuite)
  1465  }
  1466  
  1467  func (s *MachineWithCharmsSuite) TearDownSuite(c *gc.C) {
  1468  	s.commonMachineSuite.TearDownSuite(c)
  1469  	s.CharmSuite.TearDownSuite(c)
  1470  }
  1471  
  1472  func (s *MachineWithCharmsSuite) SetUpTest(c *gc.C) {
  1473  	s.commonMachineSuite.SetUpTest(c)
  1474  	s.CharmSuite.SetUpTest(c)
  1475  }
  1476  
  1477  func (s *MachineWithCharmsSuite) TearDownTest(c *gc.C) {
  1478  	s.commonMachineSuite.TearDownTest(c)
  1479  	s.CharmSuite.TearDownTest(c)
  1480  }
  1481  
  1482  func (s *MachineWithCharmsSuite) TestManageEnvironRunsCharmRevisionUpdater(c *gc.C) {
  1483  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1484  
  1485  	s.SetupScenario(c)
  1486  
  1487  	a := s.newAgent(c, m)
  1488  	go func() {
  1489  		c.Check(a.Run(nil), jc.ErrorIsNil)
  1490  	}()
  1491  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1492  
  1493  	checkRevision := func() bool {
  1494  		curl := charm.MustParseURL("cs:quantal/mysql")
  1495  		placeholder, err := s.State.LatestPlaceholderCharm(curl)
  1496  		return err == nil && placeholder.String() == curl.WithRevision(23).String()
  1497  	}
  1498  	success := false
  1499  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
  1500  		if success = checkRevision(); success {
  1501  			break
  1502  		}
  1503  	}
  1504  	c.Assert(success, jc.IsTrue)
  1505  }
  1506  
  1507  type mongoSuite struct {
  1508  	coretesting.BaseSuite
  1509  }
  1510  
  1511  func (s *mongoSuite) TestStateWorkerDialSetsWriteMajority(c *gc.C) {
  1512  	s.testStateWorkerDialSetsWriteMajority(c, true)
  1513  }
  1514  
  1515  func (s *mongoSuite) TestStateWorkerDialDoesNotSetWriteMajorityWithoutReplsetConfig(c *gc.C) {
  1516  	s.testStateWorkerDialSetsWriteMajority(c, false)
  1517  }
  1518  
  1519  func (s *mongoSuite) testStateWorkerDialSetsWriteMajority(c *gc.C, configureReplset bool) {
  1520  	inst := gitjujutesting.MgoInstance{
  1521  		EnableJournal: true,
  1522  		Params:        []string{"--replSet", "juju"},
  1523  	}
  1524  	err := inst.Start(coretesting.Certs)
  1525  	c.Assert(err, jc.ErrorIsNil)
  1526  	defer inst.Destroy()
  1527  
  1528  	var expectedWMode string
  1529  	dialOpts := stateWorkerDialOpts
  1530  	if configureReplset {
  1531  		info := inst.DialInfo()
  1532  		args := peergrouper.InitiateMongoParams{
  1533  			DialInfo:       info,
  1534  			MemberHostPort: inst.Addr(),
  1535  		}
  1536  		err = peergrouper.MaybeInitiateMongoServer(args)
  1537  		c.Assert(err, jc.ErrorIsNil)
  1538  		expectedWMode = "majority"
  1539  	} else {
  1540  		dialOpts.Direct = true
  1541  	}
  1542  
  1543  	mongoInfo := mongo.Info{
  1544  		Addrs:  []string{inst.Addr()},
  1545  		CACert: coretesting.CACert,
  1546  	}
  1547  	session, err := mongo.DialWithInfo(mongoInfo, dialOpts)
  1548  	c.Assert(err, jc.ErrorIsNil)
  1549  	defer session.Close()
  1550  
  1551  	safe := session.Safe()
  1552  	c.Assert(safe, gc.NotNil)
  1553  	c.Assert(safe.WMode, gc.Equals, expectedWMode)
  1554  	c.Assert(safe.J, jc.IsTrue) // always enabled
  1555  }
  1556  
  1557  type shouldWriteProxyFilesSuite struct {
  1558  	coretesting.BaseSuite
  1559  }
  1560  
  1561  var _ = gc.Suite(&shouldWriteProxyFilesSuite{})
  1562  
  1563  func (s *shouldWriteProxyFilesSuite) TestAll(c *gc.C) {
  1564  	tests := []struct {
  1565  		description  string
  1566  		providerType string
  1567  		machineId    string
  1568  		expect       bool
  1569  	}{{
  1570  		description:  "local provider machine 0 must not write",
  1571  		providerType: "local",
  1572  		machineId:    "0",
  1573  		expect:       false,
  1574  	}, {
  1575  		description:  "local provider other machine must write 1",
  1576  		providerType: "local",
  1577  		machineId:    "0/kvm/0",
  1578  		expect:       true,
  1579  	}, {
  1580  		description:  "local provider other machine must write 2",
  1581  		providerType: "local",
  1582  		machineId:    "123",
  1583  		expect:       true,
  1584  	}, {
  1585  		description:  "other provider machine 0 must write",
  1586  		providerType: "anything",
  1587  		machineId:    "0",
  1588  		expect:       true,
  1589  	}, {
  1590  		description:  "other provider other machine must write 1",
  1591  		providerType: "dummy",
  1592  		machineId:    "0/kvm/0",
  1593  		expect:       true,
  1594  	}, {
  1595  		description:  "other provider other machine must write 2",
  1596  		providerType: "blahblahblah",
  1597  		machineId:    "123",
  1598  		expect:       true,
  1599  	}}
  1600  	for i, test := range tests {
  1601  		c.Logf("test %d: %s", i, test.description)
  1602  		mockConf := &mockAgentConfig{
  1603  			providerType: test.providerType,
  1604  			tag:          names.NewMachineTag(test.machineId),
  1605  		}
  1606  		c.Check(shouldWriteProxyFiles(mockConf), gc.Equals, test.expect)
  1607  	}
  1608  }
  1609  
  1610  type mockAgentConfig struct {
  1611  	agent.Config
  1612  	providerType string
  1613  	tag          names.Tag
  1614  }
  1615  
  1616  func (m *mockAgentConfig) Tag() names.Tag {
  1617  	return m.tag
  1618  }
  1619  
  1620  func (m *mockAgentConfig) Value(key string) string {
  1621  	if key == agent.ProviderType {
  1622  		return m.providerType
  1623  	}
  1624  	return ""
  1625  }
  1626  
  1627  type singularRunnerRecord struct {
  1628  	runnerC chan *fakeSingularRunner
  1629  }
  1630  
  1631  func newSingularRunnerRecord() *singularRunnerRecord {
  1632  	return &singularRunnerRecord{
  1633  		runnerC: make(chan *fakeSingularRunner, 5),
  1634  	}
  1635  }
  1636  
  1637  func (r *singularRunnerRecord) newSingularRunner(runner worker.Runner, conn singular.Conn) (worker.Runner, error) {
  1638  	sr, err := singular.New(runner, conn)
  1639  	if err != nil {
  1640  		return nil, err
  1641  	}
  1642  	fakeRunner := &fakeSingularRunner{
  1643  		Runner: sr,
  1644  		startC: make(chan string, 64),
  1645  	}
  1646  	r.runnerC <- fakeRunner
  1647  	return fakeRunner, nil
  1648  }
  1649  
  1650  // nextRunner blocks until a new singular runner is created.
  1651  func (r *singularRunnerRecord) nextRunner(c *gc.C) *fakeSingularRunner {
  1652  	for {
  1653  		select {
  1654  		case r := <-r.runnerC:
  1655  			return r
  1656  		case <-time.After(coretesting.LongWait):
  1657  			c.Fatal("timed out waiting for singular runner to be created")
  1658  		}
  1659  	}
  1660  }
  1661  
  1662  type fakeSingularRunner struct {
  1663  	worker.Runner
  1664  	startC chan string
  1665  }
  1666  
  1667  func (r *fakeSingularRunner) StartWorker(name string, start func() (worker.Worker, error)) error {
  1668  	r.startC <- name
  1669  	return r.Runner.StartWorker(name, start)
  1670  }
  1671  
  1672  // waitForWorker waits for a given worker to be started, returning all
  1673  // workers started while waiting.
  1674  func (r *fakeSingularRunner) waitForWorker(c *gc.C, target string) []string {
  1675  	var seen []string
  1676  	timeout := time.After(coretesting.LongWait)
  1677  	for {
  1678  		select {
  1679  		case workerName := <-r.startC:
  1680  			seen = append(seen, workerName)
  1681  			if workerName == target {
  1682  				return seen
  1683  			}
  1684  		case <-timeout:
  1685  			c.Fatal("timed out waiting for " + target)
  1686  		}
  1687  	}
  1688  }
  1689  
  1690  func newDummyWorker() worker.Worker {
  1691  	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
  1692  		<-stop
  1693  		return nil
  1694  	})
  1695  }
  1696  
  1697  type mockMetricAPI struct {
  1698  	cleanUpCalled chan struct{}
  1699  	sendCalled    chan struct{}
  1700  }
  1701  
  1702  func (m *mockMetricAPI) CleanupOldMetrics() error {
  1703  	go func() {
  1704  		m.cleanUpCalled <- struct{}{}
  1705  	}()
  1706  	return nil
  1707  }
  1708  func (m *mockMetricAPI) SendMetrics() error {
  1709  	go func() {
  1710  		m.sendCalled <- struct{}{}
  1711  	}()
  1712  	return nil
  1713  }
  1714  
  1715  func (m *mockMetricAPI) SendCalled() <-chan struct{} {
  1716  	return m.sendCalled
  1717  }
  1718  
  1719  func (m *mockMetricAPI) CleanupCalled() <-chan struct{} {
  1720  	return m.cleanUpCalled
  1721  }
  1722  
  1723  func mkdtemp(prefix string) string {
  1724  	d, err := ioutil.TempDir("", prefix)
  1725  	if err != nil {
  1726  		panic(err)
  1727  	}
  1728  	return d
  1729  }
  1730  
  1731  func mktemp(prefix string, content string) string {
  1732  	f, err := ioutil.TempFile("", prefix)
  1733  	if err != nil {
  1734  		panic(err)
  1735  	}
  1736  	_, err = f.WriteString(content)
  1737  	if err != nil {
  1738  		panic(err)
  1739  	}
  1740  	f.Close()
  1741  	return f.Name()
  1742  }
  1743  
  1744  type runner interface {
  1745  	Run(*cmd.Context) error
  1746  	Stop() error
  1747  }
  1748  
  1749  // runWithTimeout runs an agent and waits
  1750  // for it to complete within a reasonable time.
  1751  func runWithTimeout(r runner) error {
  1752  	done := make(chan error)
  1753  	go func() {
  1754  		done <- r.Run(nil)
  1755  	}()
  1756  	select {
  1757  	case err := <-done:
  1758  		return err
  1759  	case <-time.After(coretesting.LongWait):
  1760  	}
  1761  	err := r.Stop()
  1762  	return fmt.Errorf("timed out waiting for agent to finish; stop error: %v", err)
  1763  }