github.com/Pankov404/juju@v0.0.0-20150703034450-be266991dceb/cmd/jujud/agent/machine_test.go (about)

     1  // Copyright 2012, 2013 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package agent
     5  
     6  import (
     7  	"io"
     8  	"io/ioutil"
     9  	"os"
    10  	"path/filepath"
    11  	"reflect"
    12  	"runtime"
    13  	"strings"
    14  	"sync/atomic"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/juju/cmd"
    19  	"github.com/juju/errors"
    20  	"github.com/juju/names"
    21  	gitjujutesting "github.com/juju/testing"
    22  	jc "github.com/juju/testing/checkers"
    23  	"github.com/juju/utils/proxy"
    24  	"github.com/juju/utils/set"
    25  	"github.com/juju/utils/symlink"
    26  	gc "gopkg.in/check.v1"
    27  	"gopkg.in/juju/charm.v5"
    28  	"gopkg.in/juju/charm.v5/charmrepo"
    29  	"gopkg.in/natefinch/lumberjack.v2"
    30  
    31  	"github.com/juju/juju/agent"
    32  	"github.com/juju/juju/api"
    33  	apideployer "github.com/juju/juju/api/deployer"
    34  	apienvironment "github.com/juju/juju/api/environment"
    35  	apifirewaller "github.com/juju/juju/api/firewaller"
    36  	apiinstancepoller "github.com/juju/juju/api/instancepoller"
    37  	apimetricsmanager "github.com/juju/juju/api/metricsmanager"
    38  	apinetworker "github.com/juju/juju/api/networker"
    39  	apirsyslog "github.com/juju/juju/api/rsyslog"
    40  	charmtesting "github.com/juju/juju/apiserver/charmrevisionupdater/testing"
    41  	"github.com/juju/juju/apiserver/params"
    42  	"github.com/juju/juju/cert"
    43  	agenttesting "github.com/juju/juju/cmd/jujud/agent/testing"
    44  	cmdutil "github.com/juju/juju/cmd/jujud/util"
    45  	lxctesting "github.com/juju/juju/container/lxc/testing"
    46  	"github.com/juju/juju/environs/config"
    47  	envtesting "github.com/juju/juju/environs/testing"
    48  	"github.com/juju/juju/feature"
    49  	"github.com/juju/juju/instance"
    50  	"github.com/juju/juju/juju"
    51  	jujutesting "github.com/juju/juju/juju/testing"
    52  	"github.com/juju/juju/mongo"
    53  	"github.com/juju/juju/network"
    54  	"github.com/juju/juju/provider/dummy"
    55  	"github.com/juju/juju/service/upstart"
    56  	"github.com/juju/juju/state"
    57  	"github.com/juju/juju/state/watcher"
    58  	"github.com/juju/juju/storage"
    59  	coretesting "github.com/juju/juju/testing"
    60  	"github.com/juju/juju/testing/factory"
    61  	"github.com/juju/juju/tools"
    62  	"github.com/juju/juju/utils/ssh"
    63  	sshtesting "github.com/juju/juju/utils/ssh/testing"
    64  	"github.com/juju/juju/version"
    65  	"github.com/juju/juju/worker"
    66  	"github.com/juju/juju/worker/authenticationworker"
    67  	"github.com/juju/juju/worker/certupdater"
    68  	"github.com/juju/juju/worker/deployer"
    69  	"github.com/juju/juju/worker/diskmanager"
    70  	"github.com/juju/juju/worker/instancepoller"
    71  	"github.com/juju/juju/worker/networker"
    72  	"github.com/juju/juju/worker/peergrouper"
    73  	"github.com/juju/juju/worker/proxyupdater"
    74  	"github.com/juju/juju/worker/resumer"
    75  	"github.com/juju/juju/worker/rsyslog"
    76  	"github.com/juju/juju/worker/singular"
    77  	"github.com/juju/juju/worker/storageprovisioner"
    78  	"github.com/juju/juju/worker/upgrader"
    79  )
    80  
    81  var (
    82  	_ = gc.Suite(&MachineSuite{})
    83  	_ = gc.Suite(&MachineWithCharmsSuite{})
    84  	_ = gc.Suite(&mongoSuite{})
    85  )
    86  
    87  func TestPackage(t *testing.T) {
    88  	// TODO(waigani) 2014-03-19 bug 1294458
    89  	// Refactor to use base suites
    90  
    91  	// Change the path to "juju-run", so that the
    92  	// tests don't try to write to /usr/local/bin.
    93  	JujuRun = mktemp("juju-run", "")
    94  	defer os.Remove(JujuRun)
    95  
    96  	coretesting.MgoTestPackage(t)
    97  }
    98  
    99  type commonMachineSuite struct {
   100  	singularRecord *singularRunnerRecord
   101  	lxctesting.TestSuite
   102  	fakeEnsureMongo *agenttesting.FakeEnsureMongo
   103  	AgentSuite
   104  }
   105  
   106  func (s *commonMachineSuite) SetUpSuite(c *gc.C) {
   107  	s.AgentSuite.SetUpSuite(c)
   108  	s.TestSuite.SetUpSuite(c)
   109  }
   110  
   111  func (s *commonMachineSuite) TearDownSuite(c *gc.C) {
   112  	s.TestSuite.TearDownSuite(c)
   113  	s.AgentSuite.TearDownSuite(c)
   114  }
   115  
   116  func (s *commonMachineSuite) SetUpTest(c *gc.C) {
   117  	s.AgentSuite.SetUpTest(c)
   118  	s.TestSuite.SetUpTest(c)
   119  	s.AgentSuite.PatchValue(&charmrepo.CacheDir, c.MkDir())
   120  	s.AgentSuite.PatchValue(&stateWorkerDialOpts, mongo.DialOpts{})
   121  
   122  	os.Remove(JujuRun) // ignore error; may not exist
   123  	// Patch ssh user to avoid touching ~ubuntu/.ssh/authorized_keys.
   124  	s.AgentSuite.PatchValue(&authenticationworker.SSHUser, "")
   125  
   126  	testpath := c.MkDir()
   127  	s.AgentSuite.PatchEnvPathPrepend(testpath)
   128  	// mock out the start method so we can fake install services without sudo
   129  	fakeCmd(filepath.Join(testpath, "start"))
   130  	fakeCmd(filepath.Join(testpath, "stop"))
   131  
   132  	s.AgentSuite.PatchValue(&upstart.InitDir, c.MkDir())
   133  
   134  	s.singularRecord = newSingularRunnerRecord()
   135  	s.AgentSuite.PatchValue(&newSingularRunner, s.singularRecord.newSingularRunner)
   136  	s.AgentSuite.PatchValue(&peergrouperNew, func(st *state.State) (worker.Worker, error) {
   137  		return newDummyWorker(), nil
   138  	})
   139  
   140  	s.fakeEnsureMongo = agenttesting.InstallFakeEnsureMongo(s)
   141  	s.AgentSuite.PatchValue(&maybeInitiateMongoServer, s.fakeEnsureMongo.InitiateMongo)
   142  }
   143  
   144  func fakeCmd(path string) {
   145  	err := ioutil.WriteFile(path, []byte("#!/bin/bash --norc\nexit 0"), 0755)
   146  	if err != nil {
   147  		panic(err)
   148  	}
   149  }
   150  
   151  func (s *commonMachineSuite) TearDownTest(c *gc.C) {
   152  	s.TestSuite.TearDownTest(c)
   153  	s.AgentSuite.TearDownTest(c)
   154  }
   155  
   156  // primeAgent adds a new Machine to run the given jobs, and sets up the
   157  // machine agent's directory.  It returns the new machine, the
   158  // agent's configuration and the tools currently running.
   159  func (s *commonMachineSuite) primeAgent(
   160  	c *gc.C, vers version.Binary,
   161  	jobs ...state.MachineJob) (m *state.Machine, agentConfig agent.ConfigSetterWriter, tools *tools.Tools) {
   162  
   163  	m, err := s.State.AddMachine("quantal", jobs...)
   164  	c.Assert(err, jc.ErrorIsNil)
   165  
   166  	pinger, err := m.SetAgentPresence()
   167  	c.Assert(err, jc.ErrorIsNil)
   168  	s.AddCleanup(func(c *gc.C) {
   169  		err := pinger.Stop()
   170  		c.Check(err, jc.ErrorIsNil)
   171  	})
   172  
   173  	return s.configureMachine(c, m.Id(), vers)
   174  }
   175  
   176  func (s *commonMachineSuite) configureMachine(c *gc.C, machineId string, vers version.Binary) (
   177  	machine *state.Machine, agentConfig agent.ConfigSetterWriter, tools *tools.Tools,
   178  ) {
   179  	m, err := s.State.Machine(machineId)
   180  	c.Assert(err, jc.ErrorIsNil)
   181  
   182  	// Add a machine and ensure it is provisioned.
   183  	inst, md := jujutesting.AssertStartInstance(c, s.Environ, machineId)
   184  	c.Assert(m.SetProvisioned(inst.Id(), agent.BootstrapNonce, md), jc.ErrorIsNil)
   185  
   186  	// Add an address for the tests in case the maybeInitiateMongoServer
   187  	// codepath is exercised.
   188  	s.setFakeMachineAddresses(c, m)
   189  
   190  	// Set up the new machine.
   191  	err = m.SetAgentVersion(vers)
   192  	c.Assert(err, jc.ErrorIsNil)
   193  	err = m.SetPassword(initialMachinePassword)
   194  	c.Assert(err, jc.ErrorIsNil)
   195  	tag := m.Tag()
   196  	if m.IsManager() {
   197  		err = m.SetMongoPassword(initialMachinePassword)
   198  		c.Assert(err, jc.ErrorIsNil)
   199  		agentConfig, tools = s.AgentSuite.PrimeStateAgent(c, tag, initialMachinePassword, vers)
   200  		info, ok := agentConfig.StateServingInfo()
   201  		c.Assert(ok, jc.IsTrue)
   202  		ssi := cmdutil.ParamsStateServingInfoToStateStateServingInfo(info)
   203  		err = s.State.SetStateServingInfo(ssi)
   204  		c.Assert(err, jc.ErrorIsNil)
   205  	} else {
   206  		agentConfig, tools = s.PrimeAgent(c, tag, initialMachinePassword, vers)
   207  	}
   208  	err = agentConfig.Write()
   209  	c.Assert(err, jc.ErrorIsNil)
   210  	return m, agentConfig, tools
   211  }
   212  
   213  // newAgent returns a new MachineAgent instance
   214  func (s *commonMachineSuite) newAgent(c *gc.C, m *state.Machine) *MachineAgent {
   215  	agentConf := agentConf{dataDir: s.DataDir()}
   216  	agentConf.ReadConfig(names.NewMachineTag(m.Id()).String())
   217  	machineAgentFactory := MachineAgentFactoryFn(&agentConf, &agentConf)
   218  	return machineAgentFactory(m.Id())
   219  }
   220  
   221  func (s *MachineSuite) TestParseSuccess(c *gc.C) {
   222  	create := func() (cmd.Command, AgentConf) {
   223  		agentConf := agentConf{dataDir: s.DataDir()}
   224  		a := NewMachineAgentCmd(
   225  			nil,
   226  			MachineAgentFactoryFn(&agentConf, &agentConf),
   227  			&agentConf,
   228  			&agentConf,
   229  		)
   230  		a.(*machineAgentCmd).logToStdErr = true
   231  
   232  		return a, &agentConf
   233  	}
   234  	a := CheckAgentCommand(c, create, []string{"--machine-id", "42"})
   235  	c.Assert(a.(*machineAgentCmd).machineId, gc.Equals, "42")
   236  }
   237  
   238  type MachineSuite struct {
   239  	commonMachineSuite
   240  	metricAPI *mockMetricAPI
   241  }
   242  
   243  var perEnvSingularWorkers = []string{
   244  	"cleaner",
   245  	"minunitsworker",
   246  	"addresserworker",
   247  	"environ-provisioner",
   248  	"charm-revision-updater",
   249  	"instancepoller",
   250  	"firewaller",
   251  }
   252  
   253  const initialMachinePassword = "machine-password-1234567890"
   254  
   255  func (s *MachineSuite) SetUpTest(c *gc.C) {
   256  	s.commonMachineSuite.SetUpTest(c)
   257  	s.metricAPI = newMockMetricAPI()
   258  	s.PatchValue(&getMetricAPI, func(_ *api.State) apimetricsmanager.MetricsManagerClient {
   259  		return s.metricAPI
   260  	})
   261  	s.AddCleanup(func(*gc.C) { s.metricAPI.Stop() })
   262  	// Most of these tests normally finish sub-second on a fast machine.
   263  	// If any given test hits a minute, we have almost certainly become
   264  	// wedged, so dump the logs.
   265  	coretesting.DumpTestLogsAfter(time.Minute, c, s)
   266  }
   267  
   268  func (s *MachineSuite) TestParseNonsense(c *gc.C) {
   269  	for _, args := range [][]string{
   270  		{},
   271  		{"--machine-id", "-4004"},
   272  	} {
   273  		var agentConf agentConf
   274  		err := ParseAgentCommand(&machineAgentCmd{agentInitializer: &agentConf}, args)
   275  		c.Assert(err, gc.ErrorMatches, "--machine-id option must be set, and expects a non-negative integer")
   276  	}
   277  }
   278  
   279  func (s *MachineSuite) TestParseUnknown(c *gc.C) {
   280  	var agentConf agentConf
   281  	a := &machineAgentCmd{agentInitializer: &agentConf}
   282  	err := ParseAgentCommand(a, []string{"--machine-id", "42", "blistering barnacles"})
   283  	c.Assert(err, gc.ErrorMatches, `unrecognized args: \["blistering barnacles"\]`)
   284  }
   285  
   286  func (s *MachineSuite) TestRunInvalidMachineId(c *gc.C) {
   287  	c.Skip("agents don't yet distinguish between temporary and permanent errors")
   288  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   289  	err := s.newAgent(c, m).Run(nil)
   290  	c.Assert(err, gc.ErrorMatches, "some error")
   291  }
   292  
   293  func (s *MachineSuite) TestUseLumberjack(c *gc.C) {
   294  	ctx, err := cmd.DefaultContext()
   295  	c.Assert(err, gc.IsNil)
   296  
   297  	agentConf := FakeAgentConfig{}
   298  
   299  	a := NewMachineAgentCmd(
   300  		ctx,
   301  		MachineAgentFactoryFn(agentConf, agentConf),
   302  		agentConf,
   303  		agentConf,
   304  	)
   305  	// little hack to set the data that Init expects to already be set
   306  	a.(*machineAgentCmd).machineId = "42"
   307  
   308  	err = a.Init(nil)
   309  	c.Assert(err, gc.IsNil)
   310  
   311  	l, ok := ctx.Stderr.(*lumberjack.Logger)
   312  	c.Assert(ok, jc.IsTrue)
   313  	c.Check(l.MaxAge, gc.Equals, 0)
   314  	c.Check(l.MaxBackups, gc.Equals, 2)
   315  	c.Check(l.Filename, gc.Equals, filepath.FromSlash("/var/log/juju/machine-42.log"))
   316  	c.Check(l.MaxSize, gc.Equals, 300)
   317  }
   318  
   319  func (s *MachineSuite) TestDontUseLumberjack(c *gc.C) {
   320  	ctx, err := cmd.DefaultContext()
   321  	c.Assert(err, gc.IsNil)
   322  
   323  	agentConf := FakeAgentConfig{}
   324  
   325  	a := NewMachineAgentCmd(
   326  		ctx,
   327  		MachineAgentFactoryFn(agentConf, agentConf),
   328  		agentConf,
   329  		agentConf,
   330  	)
   331  	// little hack to set the data that Init expects to already be set
   332  	a.(*machineAgentCmd).machineId = "42"
   333  
   334  	// set the value that normally gets set by the flag parsing
   335  	a.(*machineAgentCmd).logToStdErr = true
   336  
   337  	err = a.Init(nil)
   338  	c.Assert(err, gc.IsNil)
   339  
   340  	_, ok := ctx.Stderr.(*lumberjack.Logger)
   341  	c.Assert(ok, jc.IsFalse)
   342  }
   343  
   344  func (s *MachineSuite) TestRunStop(c *gc.C) {
   345  	m, ac, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   346  	a := s.newAgent(c, m)
   347  	done := make(chan error)
   348  	go func() {
   349  		done <- a.Run(nil)
   350  	}()
   351  	err := a.Stop()
   352  	c.Assert(err, jc.ErrorIsNil)
   353  	c.Assert(<-done, jc.ErrorIsNil)
   354  	c.Assert(charmrepo.CacheDir, gc.Equals, filepath.Join(ac.DataDir(), "charmcache"))
   355  }
   356  
   357  func (s *MachineSuite) TestWithDeadMachine(c *gc.C) {
   358  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   359  	err := m.EnsureDead()
   360  	c.Assert(err, jc.ErrorIsNil)
   361  	a := s.newAgent(c, m)
   362  	err = runWithTimeout(a)
   363  	c.Assert(err, jc.ErrorIsNil)
   364  }
   365  
   366  func (s *MachineSuite) TestWithRemovedMachine(c *gc.C) {
   367  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   368  	err := m.EnsureDead()
   369  	c.Assert(err, jc.ErrorIsNil)
   370  	err = m.Remove()
   371  	c.Assert(err, jc.ErrorIsNil)
   372  	a := s.newAgent(c, m)
   373  	err = runWithTimeout(a)
   374  	c.Assert(err, jc.ErrorIsNil)
   375  }
   376  
   377  func (s *MachineSuite) TestDyingMachine(c *gc.C) {
   378  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   379  	a := s.newAgent(c, m)
   380  	done := make(chan error)
   381  	go func() {
   382  		done <- a.Run(nil)
   383  	}()
   384  	defer func() {
   385  		c.Check(a.Stop(), jc.ErrorIsNil)
   386  	}()
   387  	// Wait for configuration to be finished
   388  	<-a.WorkersStarted()
   389  	err := m.Destroy()
   390  	c.Assert(err, jc.ErrorIsNil)
   391  	select {
   392  	case err := <-done:
   393  		c.Assert(err, jc.ErrorIsNil)
   394  	case <-time.After(watcher.Period * 5 / 4):
   395  		// TODO(rog) Fix this so it doesn't wait for so long.
   396  		// https://bugs.launchpad.net/juju-core/+bug/1163983
   397  		c.Fatalf("timed out waiting for agent to terminate")
   398  	}
   399  	err = m.Refresh()
   400  	c.Assert(err, jc.ErrorIsNil)
   401  	c.Assert(m.Life(), gc.Equals, state.Dead)
   402  }
   403  
   404  func (s *MachineSuite) TestHostUnits(c *gc.C) {
   405  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   406  	a := s.newAgent(c, m)
   407  	ctx, reset := patchDeployContext(c, s.BackingState)
   408  	defer reset()
   409  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   410  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   411  
   412  	// check that unassigned units don't trigger any deployments.
   413  	svc := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
   414  	u0, err := svc.AddUnit()
   415  	c.Assert(err, jc.ErrorIsNil)
   416  	u1, err := svc.AddUnit()
   417  	c.Assert(err, jc.ErrorIsNil)
   418  
   419  	ctx.waitDeployed(c)
   420  
   421  	// assign u0, check it's deployed.
   422  	err = u0.AssignToMachine(m)
   423  	c.Assert(err, jc.ErrorIsNil)
   424  	ctx.waitDeployed(c, u0.Name())
   425  
   426  	// "start the agent" for u0 to prevent short-circuited remove-on-destroy;
   427  	// check that it's kept deployed despite being Dying.
   428  	err = u0.SetAgentStatus(state.StatusIdle, "", nil)
   429  	c.Assert(err, jc.ErrorIsNil)
   430  	err = u0.Destroy()
   431  	c.Assert(err, jc.ErrorIsNil)
   432  	ctx.waitDeployed(c, u0.Name())
   433  
   434  	// add u1 to the machine, check it's deployed.
   435  	err = u1.AssignToMachine(m)
   436  	c.Assert(err, jc.ErrorIsNil)
   437  	ctx.waitDeployed(c, u0.Name(), u1.Name())
   438  
   439  	// make u0 dead; check the deployer recalls the unit and removes it from
   440  	// state.
   441  	err = u0.EnsureDead()
   442  	c.Assert(err, jc.ErrorIsNil)
   443  	ctx.waitDeployed(c, u1.Name())
   444  
   445  	// The deployer actually removes the unit just after
   446  	// removing its deployment, so we need to poll here
   447  	// until it actually happens.
   448  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
   449  		err := u0.Refresh()
   450  		if err == nil && attempt.HasNext() {
   451  			continue
   452  		}
   453  		c.Assert(err, jc.Satisfies, errors.IsNotFound)
   454  	}
   455  
   456  	// short-circuit-remove u1 after it's been deployed; check it's recalled
   457  	// and removed from state.
   458  	err = u1.Destroy()
   459  	c.Assert(err, jc.ErrorIsNil)
   460  	err = u1.Refresh()
   461  	c.Assert(err, jc.Satisfies, errors.IsNotFound)
   462  	ctx.waitDeployed(c)
   463  }
   464  
   465  func patchDeployContext(c *gc.C, st *state.State) (*fakeContext, func()) {
   466  	ctx := &fakeContext{
   467  		inited:   make(chan struct{}),
   468  		deployed: make(set.Strings),
   469  	}
   470  	orig := newDeployContext
   471  	newDeployContext = func(dst *apideployer.State, agentConfig agent.Config) deployer.Context {
   472  		ctx.st = st
   473  		ctx.agentConfig = agentConfig
   474  		close(ctx.inited)
   475  		return ctx
   476  	}
   477  	return ctx, func() { newDeployContext = orig }
   478  }
   479  
   480  func (s *commonMachineSuite) setFakeMachineAddresses(c *gc.C, machine *state.Machine) {
   481  	addrs := network.NewAddresses("0.1.2.3")
   482  	err := machine.SetProviderAddresses(addrs...)
   483  	c.Assert(err, jc.ErrorIsNil)
   484  	// Set the addresses in the environ instance as well so that if the instance poller
   485  	// runs it won't overwrite them.
   486  	instId, err := machine.InstanceId()
   487  	c.Assert(err, jc.ErrorIsNil)
   488  	insts, err := s.Environ.Instances([]instance.Id{instId})
   489  	c.Assert(err, jc.ErrorIsNil)
   490  	dummy.SetInstanceAddresses(insts[0], addrs)
   491  }
   492  
   493  func (s *MachineSuite) TestManageEnviron(c *gc.C) {
   494  	usefulVersion := version.Current
   495  	usefulVersion.Series = "quantal" // to match the charm created below
   496  	envtesting.AssertUploadFakeToolsVersions(
   497  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion)
   498  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   499  	op := make(chan dummy.Operation, 200)
   500  	dummy.Listen(op)
   501  
   502  	a := s.newAgent(c, m)
   503  	// Make sure the agent is stopped even if the test fails.
   504  	defer a.Stop()
   505  	done := make(chan error)
   506  	go func() {
   507  		done <- a.Run(nil)
   508  	}()
   509  
   510  	// See state server runners start
   511  	r0 := s.singularRecord.nextRunner(c)
   512  	r0.waitForWorker(c, "txnpruner")
   513  
   514  	r1 := s.singularRecord.nextRunner(c)
   515  	r1.waitForWorkers(c, perEnvSingularWorkers)
   516  
   517  	// Check that the provisioner and firewaller are alive by doing
   518  	// a rudimentary check that it responds to state changes.
   519  
   520  	// Add one unit to a service; it should get allocated a machine
   521  	// and then its ports should be opened.
   522  	charm := s.AddTestingCharm(c, "dummy")
   523  	svc := s.AddTestingService(c, "test-service", charm)
   524  	err := svc.SetExposed()
   525  	c.Assert(err, jc.ErrorIsNil)
   526  	units, err := juju.AddUnits(s.State, svc, 1, "")
   527  	c.Assert(err, jc.ErrorIsNil)
   528  	c.Check(opRecvTimeout(c, s.State, op, dummy.OpStartInstance{}), gc.NotNil)
   529  
   530  	// Wait for the instance id to show up in the state.
   531  	s.waitProvisioned(c, units[0])
   532  	err = units[0].OpenPort("tcp", 999)
   533  	c.Assert(err, jc.ErrorIsNil)
   534  
   535  	c.Check(opRecvTimeout(c, s.State, op, dummy.OpOpenPorts{}), gc.NotNil)
   536  
   537  	// Check that the metrics workers have started by adding metrics
   538  	select {
   539  	case <-time.After(coretesting.LongWait):
   540  		c.Fatalf("timed out waiting for metric cleanup API to be called")
   541  	case <-s.metricAPI.CleanupCalled():
   542  	}
   543  	select {
   544  	case <-time.After(coretesting.LongWait):
   545  		c.Fatalf("timed out waiting for metric sender API to be called")
   546  	case <-s.metricAPI.SendCalled():
   547  	}
   548  
   549  	err = a.Stop()
   550  	c.Assert(err, jc.ErrorIsNil)
   551  
   552  	select {
   553  	case err := <-done:
   554  		c.Assert(err, jc.ErrorIsNil)
   555  	case <-time.After(5 * time.Second):
   556  		c.Fatalf("timed out waiting for agent to terminate")
   557  	}
   558  }
   559  
   560  func (s *MachineSuite) TestManageEnvironRunsResumer(c *gc.C) {
   561  	started := make(chan struct{})
   562  	s.AgentSuite.PatchValue(&newResumer, func(st resumer.TransactionResumer) *resumer.Resumer {
   563  		close(started)
   564  		return resumer.NewResumer(st)
   565  	})
   566  
   567  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   568  	a := s.newAgent(c, m)
   569  	defer a.Stop()
   570  	go func() {
   571  		c.Check(a.Run(nil), jc.ErrorIsNil)
   572  	}()
   573  
   574  	// Wait for the worker that starts before the resumer to start.
   575  	_ = s.singularRecord.nextRunner(c)
   576  	r := s.singularRecord.nextRunner(c)
   577  	r.waitForWorker(c, "charm-revision-updater")
   578  
   579  	// Now make sure the resumer starts.
   580  	select {
   581  	case <-started:
   582  	case <-time.After(coretesting.LongWait):
   583  		c.Fatalf("resumer worker not started as expected")
   584  	}
   585  }
   586  
   587  func (s *MachineSuite) TestManageEnvironStartsInstancePoller(c *gc.C) {
   588  	started := make(chan struct{})
   589  	s.AgentSuite.PatchValue(&newInstancePoller, func(st *apiinstancepoller.API) worker.Worker {
   590  		close(started)
   591  		return instancepoller.NewWorker(st)
   592  	})
   593  
   594  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   595  	a := s.newAgent(c, m)
   596  	defer a.Stop()
   597  	go func() {
   598  		c.Check(a.Run(nil), jc.ErrorIsNil)
   599  	}()
   600  
   601  	// Wait for the worker that starts before the instancepoller to
   602  	// start.
   603  	_ = s.singularRecord.nextRunner(c)
   604  	r := s.singularRecord.nextRunner(c)
   605  	r.waitForWorker(c, "charm-revision-updater")
   606  
   607  	// Now make sure the resumer starts.
   608  	select {
   609  	case <-started:
   610  	case <-time.After(coretesting.LongWait):
   611  		c.Fatalf("instancepoller worker not started as expected")
   612  	}
   613  }
   614  
   615  const startWorkerWait = 250 * time.Millisecond
   616  
   617  func (s *MachineSuite) TestManageEnvironDoesNotRunFirewallerWhenModeIsNone(c *gc.C) {
   618  	s.PatchValue(&getFirewallMode, func(*api.State) (string, error) {
   619  		return config.FwNone, nil
   620  	})
   621  	started := make(chan struct{})
   622  	s.AgentSuite.PatchValue(&newFirewaller, func(st *apifirewaller.State) (worker.Worker, error) {
   623  		close(started)
   624  		return newDummyWorker(), nil
   625  	})
   626  
   627  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   628  	a := s.newAgent(c, m)
   629  	defer a.Stop()
   630  	go func() {
   631  		c.Check(a.Run(nil), jc.ErrorIsNil)
   632  	}()
   633  
   634  	// Wait for the worker that starts before the firewaller to start.
   635  	_ = s.singularRecord.nextRunner(c)
   636  	r := s.singularRecord.nextRunner(c)
   637  	r.waitForWorker(c, "charm-revision-updater")
   638  
   639  	// Now make sure the firewaller doesn't start.
   640  	select {
   641  	case <-started:
   642  		c.Fatalf("firewaller worker unexpectedly started")
   643  	case <-time.After(startWorkerWait):
   644  	}
   645  }
   646  
   647  func (s *MachineSuite) TestManageEnvironRunsInstancePoller(c *gc.C) {
   648  	s.AgentSuite.PatchValue(&instancepoller.ShortPoll, 500*time.Millisecond)
   649  	usefulVersion := version.Current
   650  	usefulVersion.Series = "quantal" // to match the charm created below
   651  	envtesting.AssertUploadFakeToolsVersions(
   652  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion)
   653  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   654  	a := s.newAgent(c, m)
   655  	defer a.Stop()
   656  	go func() {
   657  		c.Check(a.Run(nil), jc.ErrorIsNil)
   658  	}()
   659  
   660  	// Add one unit to a service;
   661  	charm := s.AddTestingCharm(c, "dummy")
   662  	svc := s.AddTestingService(c, "test-service", charm)
   663  	units, err := juju.AddUnits(s.State, svc, 1, "")
   664  	c.Assert(err, jc.ErrorIsNil)
   665  
   666  	m, instId := s.waitProvisioned(c, units[0])
   667  	insts, err := s.Environ.Instances([]instance.Id{instId})
   668  	c.Assert(err, jc.ErrorIsNil)
   669  	addrs := network.NewAddresses("1.2.3.4")
   670  	dummy.SetInstanceAddresses(insts[0], addrs)
   671  	dummy.SetInstanceStatus(insts[0], "running")
   672  
   673  	for a := coretesting.LongAttempt.Start(); a.Next(); {
   674  		if !a.HasNext() {
   675  			c.Logf("final machine addresses: %#v", m.Addresses())
   676  			c.Fatalf("timed out waiting for machine to get address")
   677  		}
   678  		err := m.Refresh()
   679  		c.Assert(err, jc.ErrorIsNil)
   680  		instStatus, err := m.InstanceStatus()
   681  		c.Assert(err, jc.ErrorIsNil)
   682  		if reflect.DeepEqual(m.Addresses(), addrs) && instStatus == "running" {
   683  			break
   684  		}
   685  	}
   686  }
   687  
   688  func (s *MachineSuite) TestManageEnvironRunsPeergrouper(c *gc.C) {
   689  	started := make(chan struct{}, 1)
   690  	s.AgentSuite.PatchValue(&peergrouperNew, func(st *state.State) (worker.Worker, error) {
   691  		c.Check(st, gc.NotNil)
   692  		select {
   693  		case started <- struct{}{}:
   694  		default:
   695  		}
   696  		return newDummyWorker(), nil
   697  	})
   698  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   699  	a := s.newAgent(c, m)
   700  	defer a.Stop()
   701  	go func() {
   702  		c.Check(a.Run(nil), jc.ErrorIsNil)
   703  	}()
   704  	select {
   705  	case <-started:
   706  	case <-time.After(coretesting.LongWait):
   707  		c.Fatalf("timed out waiting for peergrouper worker to be started")
   708  	}
   709  }
   710  
   711  func (s *MachineSuite) TestManageEnvironRunsDbLogPrunerIfFeatureFlagEnabled(c *gc.C) {
   712  	s.SetFeatureFlags(feature.DbLog)
   713  
   714  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   715  	a := s.newAgent(c, m)
   716  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   717  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   718  
   719  	runner := s.singularRecord.nextRunner(c)
   720  	runner.waitForWorker(c, "dblogpruner")
   721  }
   722  
   723  func (s *MachineSuite) TestManageEnvironDoesntRunDbLogPrunerByDefault(c *gc.C) {
   724  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   725  	a := s.newAgent(c, m)
   726  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   727  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   728  
   729  	// Wait for the txnpruner to be started. This is started just after
   730  	// dblogpruner would be started.
   731  	runner := s.singularRecord.nextRunner(c)
   732  	started := set.NewStrings(runner.waitForWorker(c, "txnpruner")...)
   733  	c.Assert(started.Contains("dblogpruner"), jc.IsFalse)
   734  }
   735  
   736  func (s *MachineSuite) TestManageEnvironRunsStatusHistoryPruner(c *gc.C) {
   737  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   738  	a := s.newAgent(c, m)
   739  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   740  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   741  
   742  	runner := s.singularRecord.nextRunner(c)
   743  	runner.waitForWorker(c, "statushistorypruner")
   744  }
   745  
   746  func (s *MachineSuite) TestManageEnvironCallsUseMultipleCPUs(c *gc.C) {
   747  	// If it has been enabled, the JobManageEnviron agent should call utils.UseMultipleCPUs
   748  	usefulVersion := version.Current
   749  	usefulVersion.Series = "quantal"
   750  	envtesting.AssertUploadFakeToolsVersions(
   751  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion)
   752  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   753  	calledChan := make(chan struct{}, 1)
   754  	s.AgentSuite.PatchValue(&useMultipleCPUs, func() { calledChan <- struct{}{} })
   755  	// Now, start the agent, and observe that a JobManageEnviron agent
   756  	// calls UseMultipleCPUs
   757  	a := s.newAgent(c, m)
   758  	defer a.Stop()
   759  	go func() {
   760  		c.Check(a.Run(nil), jc.ErrorIsNil)
   761  	}()
   762  	// Wait for configuration to be finished
   763  	<-a.WorkersStarted()
   764  	select {
   765  	case <-calledChan:
   766  	case <-time.After(coretesting.LongWait):
   767  		c.Errorf("we failed to call UseMultipleCPUs()")
   768  	}
   769  	c.Check(a.Stop(), jc.ErrorIsNil)
   770  	// However, an agent that just JobHostUnits doesn't call UseMultipleCPUs
   771  	m2, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   772  	a2 := s.newAgent(c, m2)
   773  	defer a2.Stop()
   774  	go func() {
   775  		c.Check(a2.Run(nil), jc.ErrorIsNil)
   776  	}()
   777  	// Wait until all the workers have been started, and then kill everything
   778  	<-a2.workersStarted
   779  	c.Check(a2.Stop(), jc.ErrorIsNil)
   780  	select {
   781  	case <-calledChan:
   782  		c.Errorf("we should not have called UseMultipleCPUs()")
   783  	case <-time.After(coretesting.ShortWait):
   784  	}
   785  }
   786  
   787  func (s *MachineSuite) waitProvisioned(c *gc.C, unit *state.Unit) (*state.Machine, instance.Id) {
   788  	c.Logf("waiting for unit %q to be provisioned", unit)
   789  	machineId, err := unit.AssignedMachineId()
   790  	c.Assert(err, jc.ErrorIsNil)
   791  	m, err := s.State.Machine(machineId)
   792  	c.Assert(err, jc.ErrorIsNil)
   793  	w := m.Watch()
   794  	defer w.Stop()
   795  	timeout := time.After(coretesting.LongWait)
   796  	for {
   797  		select {
   798  		case <-timeout:
   799  			c.Fatalf("timed out waiting for provisioning")
   800  		case _, ok := <-w.Changes():
   801  			c.Assert(ok, jc.IsTrue)
   802  			err := m.Refresh()
   803  			c.Assert(err, jc.ErrorIsNil)
   804  			if instId, err := m.InstanceId(); err == nil {
   805  				c.Logf("unit provisioned with instance %s", instId)
   806  				return m, instId
   807  			} else {
   808  				c.Check(err, jc.Satisfies, errors.IsNotProvisioned)
   809  			}
   810  		}
   811  	}
   812  }
   813  
   814  func (s *MachineSuite) testUpgradeRequest(c *gc.C, agent runner, tag string, currentTools *tools.Tools) {
   815  	newVers := version.Current
   816  	newVers.Patch++
   817  	newTools := envtesting.AssertUploadFakeToolsVersions(
   818  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), newVers)[0]
   819  	err := s.State.SetEnvironAgentVersion(newVers.Number)
   820  	c.Assert(err, jc.ErrorIsNil)
   821  	err = runWithTimeout(agent)
   822  	envtesting.CheckUpgraderReadyError(c, err, &upgrader.UpgradeReadyError{
   823  		AgentName: tag,
   824  		OldTools:  currentTools.Version,
   825  		NewTools:  newTools.Version,
   826  		DataDir:   s.DataDir(),
   827  	})
   828  }
   829  
   830  func (s *MachineSuite) TestUpgradeRequest(c *gc.C) {
   831  	m, _, currentTools := s.primeAgent(c, version.Current, state.JobManageEnviron, state.JobHostUnits)
   832  	a := s.newAgent(c, m)
   833  	s.testUpgradeRequest(c, a, m.Tag().String(), currentTools)
   834  	c.Assert(a.isAgentUpgradePending(), jc.IsTrue)
   835  }
   836  
   837  func (s *MachineSuite) TestNoUpgradeRequired(c *gc.C) {
   838  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron, state.JobHostUnits)
   839  	a := s.newAgent(c, m)
   840  	done := make(chan error)
   841  	go func() { done <- a.Run(nil) }()
   842  	select {
   843  	case <-a.initialAgentUpgradeCheckComplete:
   844  	case <-time.After(coretesting.LongWait):
   845  		c.Fatalf("timeout waiting for upgrade check")
   846  	}
   847  	defer a.Stop() // in case of failure
   848  	s.waitStopped(c, state.JobManageEnviron, a, done)
   849  	c.Assert(a.isAgentUpgradePending(), jc.IsFalse)
   850  }
   851  
   852  var fastDialOpts = api.DialOpts{
   853  	Timeout:    coretesting.LongWait,
   854  	RetryDelay: coretesting.ShortWait,
   855  }
   856  
   857  func (s *MachineSuite) waitStopped(c *gc.C, job state.MachineJob, a *MachineAgent, done chan error) {
   858  	err := a.Stop()
   859  	if job == state.JobManageEnviron {
   860  		// When shutting down, the API server can be shut down before
   861  		// the other workers that connect to it, so they get an error so
   862  		// they then die, causing Stop to return an error.  It's not
   863  		// easy to control the actual error that's received in this
   864  		// circumstance so we just log it rather than asserting that it
   865  		// is not nil.
   866  		if err != nil {
   867  			c.Logf("error shutting down state manager: %v", err)
   868  		}
   869  	} else {
   870  		c.Assert(err, jc.ErrorIsNil)
   871  	}
   872  
   873  	select {
   874  	case err := <-done:
   875  		c.Assert(err, jc.ErrorIsNil)
   876  	case <-time.After(5 * time.Second):
   877  		c.Fatalf("timed out waiting for agent to terminate")
   878  	}
   879  }
   880  
   881  func (s *MachineSuite) assertJobWithAPI(
   882  	c *gc.C,
   883  	job state.MachineJob,
   884  	test func(agent.Config, *api.State),
   885  ) {
   886  	s.assertAgentOpensState(c, &reportOpenedAPI, job, func(cfg agent.Config, st interface{}) {
   887  		test(cfg, st.(*api.State))
   888  	})
   889  }
   890  
   891  func (s *MachineSuite) assertJobWithState(
   892  	c *gc.C,
   893  	job state.MachineJob,
   894  	test func(agent.Config, *state.State),
   895  ) {
   896  	paramsJob := job.ToParams()
   897  	if !paramsJob.NeedsState() {
   898  		c.Fatalf("%v does not use state", paramsJob)
   899  	}
   900  	s.assertAgentOpensState(c, &reportOpenedState, job, func(cfg agent.Config, st interface{}) {
   901  		test(cfg, st.(*state.State))
   902  	})
   903  }
   904  
   905  // assertAgentOpensState asserts that a machine agent started with the
   906  // given job will call the function pointed to by reportOpened. The
   907  // agent's configuration and the value passed to reportOpened are then
   908  // passed to the test function for further checking.
   909  func (s *MachineSuite) assertAgentOpensState(
   910  	c *gc.C,
   911  	reportOpened *func(io.Closer),
   912  	job state.MachineJob,
   913  	test func(agent.Config, interface{}),
   914  ) {
   915  	stm, conf, _ := s.primeAgent(c, version.Current, job)
   916  	a := s.newAgent(c, stm)
   917  	defer a.Stop()
   918  	logger.Debugf("new agent %#v", a)
   919  
   920  	// All state jobs currently also run an APIWorker, so no
   921  	// need to check for that here, like in assertJobWithState.
   922  
   923  	agentAPIs := make(chan io.Closer, 1)
   924  	s.AgentSuite.PatchValue(reportOpened, func(st io.Closer) {
   925  		select {
   926  		case agentAPIs <- st:
   927  		default:
   928  		}
   929  	})
   930  
   931  	done := make(chan error)
   932  	go func() {
   933  		done <- a.Run(nil)
   934  	}()
   935  
   936  	select {
   937  	case agentAPI := <-agentAPIs:
   938  		c.Assert(agentAPI, gc.NotNil)
   939  		test(conf, agentAPI)
   940  	case <-time.After(coretesting.LongWait):
   941  		c.Fatalf("API not opened")
   942  	}
   943  
   944  	s.waitStopped(c, job, a, done)
   945  }
   946  
   947  func (s *MachineSuite) TestManageEnvironServesAPI(c *gc.C) {
   948  	s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) {
   949  		st, err := api.Open(conf.APIInfo(), fastDialOpts)
   950  		c.Assert(err, jc.ErrorIsNil)
   951  		defer st.Close()
   952  		m, err := st.Machiner().Machine(conf.Tag().(names.MachineTag))
   953  		c.Assert(err, jc.ErrorIsNil)
   954  		c.Assert(m.Life(), gc.Equals, params.Alive)
   955  	})
   956  }
   957  
   958  func (s *MachineSuite) assertAgentSetsToolsVersion(c *gc.C, job state.MachineJob) {
   959  	vers := version.Current
   960  	vers.Minor = version.Current.Minor + 1
   961  	m, _, _ := s.primeAgent(c, vers, job)
   962  	a := s.newAgent(c, m)
   963  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   964  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   965  
   966  	timeout := time.After(coretesting.LongWait)
   967  	for done := false; !done; {
   968  		select {
   969  		case <-timeout:
   970  			c.Fatalf("timeout while waiting for agent version to be set")
   971  		case <-time.After(coretesting.ShortWait):
   972  			c.Log("Refreshing")
   973  			err := m.Refresh()
   974  			c.Assert(err, jc.ErrorIsNil)
   975  			c.Log("Fetching agent tools")
   976  			agentTools, err := m.AgentTools()
   977  			c.Assert(err, jc.ErrorIsNil)
   978  			c.Logf("(%v vs. %v)", agentTools.Version, version.Current)
   979  			if agentTools.Version.Minor != version.Current.Minor {
   980  				continue
   981  			}
   982  			c.Assert(agentTools.Version, gc.DeepEquals, version.Current)
   983  			done = true
   984  		}
   985  	}
   986  }
   987  
   988  func (s *MachineSuite) TestAgentSetsToolsVersionManageEnviron(c *gc.C) {
   989  	s.assertAgentSetsToolsVersion(c, state.JobManageEnviron)
   990  }
   991  
   992  func (s *MachineSuite) TestAgentSetsToolsVersionHostUnits(c *gc.C) {
   993  	s.assertAgentSetsToolsVersion(c, state.JobHostUnits)
   994  }
   995  
   996  func (s *MachineSuite) TestManageEnvironRunsCleaner(c *gc.C) {
   997  	s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) {
   998  		// Create a service and unit, and destroy the service.
   999  		service := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
  1000  		unit, err := service.AddUnit()
  1001  		c.Assert(err, jc.ErrorIsNil)
  1002  		err = service.Destroy()
  1003  		c.Assert(err, jc.ErrorIsNil)
  1004  
  1005  		// Check the unit was not yet removed.
  1006  		err = unit.Refresh()
  1007  		c.Assert(err, jc.ErrorIsNil)
  1008  		w := unit.Watch()
  1009  		defer w.Stop()
  1010  
  1011  		// Trigger a sync on the state used by the agent, and wait
  1012  		// for the unit to be removed.
  1013  		agentState.StartSync()
  1014  		timeout := time.After(coretesting.LongWait)
  1015  		for done := false; !done; {
  1016  			select {
  1017  			case <-timeout:
  1018  				c.Fatalf("unit not cleaned up")
  1019  			case <-time.After(coretesting.ShortWait):
  1020  				s.State.StartSync()
  1021  			case <-w.Changes():
  1022  				err := unit.Refresh()
  1023  				if errors.IsNotFound(err) {
  1024  					done = true
  1025  				} else {
  1026  					c.Assert(err, jc.ErrorIsNil)
  1027  				}
  1028  			}
  1029  		}
  1030  	})
  1031  }
  1032  
  1033  func (s *MachineSuite) TestJobManageEnvironRunsMinUnitsWorker(c *gc.C) {
  1034  	s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) {
  1035  		// Ensure that the MinUnits worker is alive by doing a simple check
  1036  		// that it responds to state changes: add a service, set its minimum
  1037  		// number of units to one, wait for the worker to add the missing unit.
  1038  		service := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
  1039  		err := service.SetMinUnits(1)
  1040  		c.Assert(err, jc.ErrorIsNil)
  1041  		w := service.Watch()
  1042  		defer w.Stop()
  1043  
  1044  		// Trigger a sync on the state used by the agent, and wait for the unit
  1045  		// to be created.
  1046  		agentState.StartSync()
  1047  		timeout := time.After(coretesting.LongWait)
  1048  		for {
  1049  			select {
  1050  			case <-timeout:
  1051  				c.Fatalf("unit not created")
  1052  			case <-time.After(coretesting.ShortWait):
  1053  				s.State.StartSync()
  1054  			case <-w.Changes():
  1055  				units, err := service.AllUnits()
  1056  				c.Assert(err, jc.ErrorIsNil)
  1057  				if len(units) == 1 {
  1058  					return
  1059  				}
  1060  			}
  1061  		}
  1062  	})
  1063  }
  1064  
  1065  func (s *MachineSuite) TestMachineAgentRunsAuthorisedKeysWorker(c *gc.C) {
  1066  	//TODO(bogdanteleaga): Fix once we get authentication worker up on windows
  1067  	if runtime.GOOS == "windows" {
  1068  		c.Skip("bug 1403084: authentication worker not yet implemented on windows")
  1069  	}
  1070  	// Start the machine agent.
  1071  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1072  	a := s.newAgent(c, m)
  1073  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1074  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1075  
  1076  	// Update the keys in the environment.
  1077  	sshKey := sshtesting.ValidKeyOne.Key + " user@host"
  1078  	err := s.BackingState.UpdateEnvironConfig(map[string]interface{}{"authorized-keys": sshKey}, nil, nil)
  1079  	c.Assert(err, jc.ErrorIsNil)
  1080  
  1081  	// Wait for ssh keys file to be updated.
  1082  	s.State.StartSync()
  1083  	timeout := time.After(coretesting.LongWait)
  1084  	sshKeyWithCommentPrefix := sshtesting.ValidKeyOne.Key + " Juju:user@host"
  1085  	for {
  1086  		select {
  1087  		case <-timeout:
  1088  			c.Fatalf("timeout while waiting for authorised ssh keys to change")
  1089  		case <-time.After(coretesting.ShortWait):
  1090  			keys, err := ssh.ListKeys(authenticationworker.SSHUser, ssh.FullKeys)
  1091  			c.Assert(err, jc.ErrorIsNil)
  1092  			keysStr := strings.Join(keys, "\n")
  1093  			if sshKeyWithCommentPrefix != keysStr {
  1094  				continue
  1095  			}
  1096  			return
  1097  		}
  1098  	}
  1099  }
  1100  
  1101  // opRecvTimeout waits for any of the given kinds of operation to
  1102  // be received from ops, and times out if not.
  1103  func opRecvTimeout(c *gc.C, st *state.State, opc <-chan dummy.Operation, kinds ...dummy.Operation) dummy.Operation {
  1104  	st.StartSync()
  1105  	for {
  1106  		select {
  1107  		case op := <-opc:
  1108  			for _, k := range kinds {
  1109  				if reflect.TypeOf(op) == reflect.TypeOf(k) {
  1110  					return op
  1111  				}
  1112  			}
  1113  			c.Logf("discarding unknown event %#v", op)
  1114  		case <-time.After(15 * time.Second):
  1115  			c.Fatalf("time out wating for operation")
  1116  		}
  1117  	}
  1118  }
  1119  
  1120  func (s *MachineSuite) TestOpenStateFailsForJobHostUnitsButOpenAPIWorks(c *gc.C) {
  1121  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1122  	a := s.newAgent(c, m)
  1123  	s.RunTestOpenAPIState(c, m, a, initialMachinePassword)
  1124  	s.assertJobWithAPI(c, state.JobHostUnits, func(conf agent.Config, st *api.State) {
  1125  		s.AssertCannotOpenState(c, conf.Tag(), conf.DataDir())
  1126  	})
  1127  }
  1128  
  1129  func (s *MachineSuite) TestOpenStateWorksForJobManageEnviron(c *gc.C) {
  1130  	s.assertJobWithAPI(c, state.JobManageEnviron, func(conf agent.Config, st *api.State) {
  1131  		s.AssertCanOpenState(c, conf.Tag(), conf.DataDir())
  1132  	})
  1133  }
  1134  
  1135  func (s *MachineSuite) TestMachineAgentSymlinkJujuRun(c *gc.C) {
  1136  	_, err := os.Stat(JujuRun)
  1137  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1138  	s.assertJobWithAPI(c, state.JobManageEnviron, func(conf agent.Config, st *api.State) {
  1139  		// juju-run should have been created
  1140  		_, err := os.Stat(JujuRun)
  1141  		c.Assert(err, jc.ErrorIsNil)
  1142  	})
  1143  }
  1144  
  1145  func (s *MachineSuite) TestMachineAgentSymlinkJujuRunExists(c *gc.C) {
  1146  	if runtime.GOOS == "windows" {
  1147  		// Cannot make symlink to nonexistent file on windows or
  1148  		// create a file point a symlink to it then remove it
  1149  		c.Skip("Cannot test this on windows")
  1150  	}
  1151  	err := symlink.New("/nowhere/special", JujuRun)
  1152  	c.Assert(err, jc.ErrorIsNil)
  1153  	_, err = os.Stat(JujuRun)
  1154  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1155  	s.assertJobWithAPI(c, state.JobManageEnviron, func(conf agent.Config, st *api.State) {
  1156  		// juju-run should have been recreated
  1157  		_, err := os.Stat(JujuRun)
  1158  		c.Assert(err, jc.ErrorIsNil)
  1159  		link, err := symlink.Read(JujuRun)
  1160  		c.Assert(err, jc.ErrorIsNil)
  1161  		c.Assert(link, gc.Not(gc.Equals), "/nowhere/special")
  1162  	})
  1163  }
  1164  
  1165  func (s *MachineSuite) TestProxyUpdater(c *gc.C) {
  1166  	s.assertProxyUpdater(c, true)
  1167  	s.assertProxyUpdater(c, false)
  1168  }
  1169  
  1170  func (s *MachineSuite) assertProxyUpdater(c *gc.C, expectWriteSystemFiles bool) {
  1171  	// Patch out the func that decides whether we should write system files.
  1172  	var gotConf agent.Config
  1173  	s.AgentSuite.PatchValue(&shouldWriteProxyFiles, func(conf agent.Config) bool {
  1174  		gotConf = conf
  1175  		return expectWriteSystemFiles
  1176  	})
  1177  
  1178  	// Make sure there are some proxy settings to write.
  1179  	expectSettings := proxy.Settings{
  1180  		Http:  "http proxy",
  1181  		Https: "https proxy",
  1182  		Ftp:   "ftp proxy",
  1183  	}
  1184  	updateAttrs := config.ProxyConfigMap(expectSettings)
  1185  	err := s.State.UpdateEnvironConfig(updateAttrs, nil, nil)
  1186  	c.Assert(err, jc.ErrorIsNil)
  1187  
  1188  	// Patch out the actual worker func.
  1189  	started := make(chan struct{})
  1190  	mockNew := func(api *apienvironment.Facade, writeSystemFiles bool) worker.Worker {
  1191  		// Direct check of the behaviour flag.
  1192  		c.Check(writeSystemFiles, gc.Equals, expectWriteSystemFiles)
  1193  		// Indirect check that we get a functional API.
  1194  		conf, err := api.EnvironConfig()
  1195  		if c.Check(err, jc.ErrorIsNil) {
  1196  			actualSettings := conf.ProxySettings()
  1197  			c.Check(actualSettings, jc.DeepEquals, expectSettings)
  1198  		}
  1199  		return worker.NewSimpleWorker(func(_ <-chan struct{}) error {
  1200  			close(started)
  1201  			return nil
  1202  		})
  1203  	}
  1204  	s.AgentSuite.PatchValue(&proxyupdater.New, mockNew)
  1205  
  1206  	s.primeAgent(c, version.Current, state.JobHostUnits)
  1207  	s.assertJobWithAPI(c, state.JobHostUnits, func(conf agent.Config, st *api.State) {
  1208  		for {
  1209  			select {
  1210  			case <-time.After(coretesting.LongWait):
  1211  				c.Fatalf("timeout while waiting for proxy updater to start")
  1212  			case <-started:
  1213  				c.Assert(gotConf, jc.DeepEquals, conf)
  1214  				return
  1215  			}
  1216  		}
  1217  	})
  1218  }
  1219  
  1220  func (s *MachineSuite) TestMachineAgentUninstall(c *gc.C) {
  1221  	m, ac, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1222  	err := m.EnsureDead()
  1223  	c.Assert(err, jc.ErrorIsNil)
  1224  	a := s.newAgent(c, m)
  1225  	err = runWithTimeout(a)
  1226  	c.Assert(err, jc.ErrorIsNil)
  1227  	// juju-run should have been removed on termination
  1228  	_, err = os.Stat(JujuRun)
  1229  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1230  	// data-dir should have been removed on termination
  1231  	_, err = os.Stat(ac.DataDir())
  1232  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1233  }
  1234  
  1235  func (s *MachineSuite) TestMachineAgentRsyslogManageEnviron(c *gc.C) {
  1236  	s.testMachineAgentRsyslogConfigWorker(c, state.JobManageEnviron, rsyslog.RsyslogModeAccumulate)
  1237  }
  1238  
  1239  func (s *MachineSuite) TestMachineAgentRsyslogHostUnits(c *gc.C) {
  1240  	s.testMachineAgentRsyslogConfigWorker(c, state.JobHostUnits, rsyslog.RsyslogModeForwarding)
  1241  }
  1242  
  1243  func (s *MachineSuite) testMachineAgentRsyslogConfigWorker(c *gc.C, job state.MachineJob, expectedMode rsyslog.RsyslogMode) {
  1244  	created := make(chan rsyslog.RsyslogMode, 1)
  1245  	s.AgentSuite.PatchValue(&cmdutil.NewRsyslogConfigWorker, func(_ *apirsyslog.State, _ agent.Config, mode rsyslog.RsyslogMode) (worker.Worker, error) {
  1246  		created <- mode
  1247  		return newDummyWorker(), nil
  1248  	})
  1249  	s.assertJobWithAPI(c, job, func(conf agent.Config, st *api.State) {
  1250  		select {
  1251  		case <-time.After(coretesting.LongWait):
  1252  			c.Fatalf("timeout while waiting for rsyslog worker to be created")
  1253  		case mode := <-created:
  1254  			c.Assert(mode, gc.Equals, expectedMode)
  1255  		}
  1256  	})
  1257  }
  1258  
  1259  func (s *MachineSuite) TestMachineAgentRunsAPIAddressUpdaterWorker(c *gc.C) {
  1260  	// Start the machine agent.
  1261  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1262  	a := s.newAgent(c, m)
  1263  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1264  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1265  
  1266  	// Update the API addresses.
  1267  	updatedServers := [][]network.HostPort{
  1268  		network.NewHostPorts(1234, "localhost"),
  1269  	}
  1270  	err := s.BackingState.SetAPIHostPorts(updatedServers)
  1271  	c.Assert(err, jc.ErrorIsNil)
  1272  
  1273  	// Wait for config to be updated.
  1274  	s.BackingState.StartSync()
  1275  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
  1276  		addrs, err := a.CurrentConfig().APIAddresses()
  1277  		c.Assert(err, jc.ErrorIsNil)
  1278  		if reflect.DeepEqual(addrs, []string{"localhost:1234"}) {
  1279  			return
  1280  		}
  1281  	}
  1282  	c.Fatalf("timeout while waiting for agent config to change")
  1283  }
  1284  
  1285  func (s *MachineSuite) TestMachineAgentRunsDiskManagerWorker(c *gc.C) {
  1286  	// Patch out the worker func before starting the agent.
  1287  	started := make(chan struct{})
  1288  	newWorker := func(diskmanager.ListBlockDevicesFunc, diskmanager.BlockDeviceSetter) worker.Worker {
  1289  		close(started)
  1290  		return worker.NewNoOpWorker()
  1291  	}
  1292  	s.PatchValue(&newDiskManager, newWorker)
  1293  
  1294  	// Start the machine agent.
  1295  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1296  	a := s.newAgent(c, m)
  1297  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1298  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1299  
  1300  	// Wait for worker to be started.
  1301  	select {
  1302  	case <-started:
  1303  	case <-time.After(coretesting.LongWait):
  1304  		c.Fatalf("timeout while waiting for diskmanager worker to start")
  1305  	}
  1306  }
  1307  
  1308  func (s *MachineSuite) TestDiskManagerWorkerUpdatesState(c *gc.C) {
  1309  	expected := []storage.BlockDevice{{DeviceName: "whatever"}}
  1310  	s.PatchValue(&diskmanager.DefaultListBlockDevices, func() ([]storage.BlockDevice, error) {
  1311  		return expected, nil
  1312  	})
  1313  
  1314  	// Start the machine agent.
  1315  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1316  	a := s.newAgent(c, m)
  1317  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1318  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1319  
  1320  	// Wait for state to be updated.
  1321  	s.BackingState.StartSync()
  1322  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
  1323  		devices, err := s.BackingState.BlockDevices(m.MachineTag())
  1324  		c.Assert(err, jc.ErrorIsNil)
  1325  		if len(devices) > 0 {
  1326  			c.Assert(devices, gc.HasLen, 1)
  1327  			c.Assert(devices[0].DeviceName, gc.Equals, expected[0].DeviceName)
  1328  			return
  1329  		}
  1330  	}
  1331  	c.Fatalf("timeout while waiting for block devices to be recorded")
  1332  }
  1333  
  1334  func (s *MachineSuite) TestMachineAgentRunsMachineStorageWorker(c *gc.C) {
  1335  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1336  
  1337  	started := make(chan struct{})
  1338  	newWorker := func(
  1339  		scope names.Tag,
  1340  		storageDir string,
  1341  		_ storageprovisioner.VolumeAccessor,
  1342  		_ storageprovisioner.FilesystemAccessor,
  1343  		_ storageprovisioner.LifecycleManager,
  1344  		_ storageprovisioner.EnvironAccessor,
  1345  		_ storageprovisioner.MachineAccessor,
  1346  	) worker.Worker {
  1347  		c.Check(scope, gc.Equals, m.Tag())
  1348  		// storageDir is not empty for machine scoped storage provisioners
  1349  		c.Assert(storageDir, gc.Not(gc.Equals), "")
  1350  		close(started)
  1351  		return worker.NewNoOpWorker()
  1352  	}
  1353  	s.PatchValue(&newStorageWorker, newWorker)
  1354  
  1355  	// Start the machine agent.
  1356  	a := s.newAgent(c, m)
  1357  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1358  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1359  
  1360  	// Wait for worker to be started.
  1361  	select {
  1362  	case <-started:
  1363  	case <-time.After(coretesting.LongWait):
  1364  		c.Fatalf("timeout while waiting for storage worker to start")
  1365  	}
  1366  }
  1367  
  1368  func (s *MachineSuite) TestMachineAgentRunsEnvironStorageWorker(c *gc.C) {
  1369  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1370  
  1371  	var numWorkers, machineWorkers, environWorkers uint32
  1372  	started := make(chan struct{})
  1373  	newWorker := func(
  1374  		scope names.Tag,
  1375  		storageDir string,
  1376  		_ storageprovisioner.VolumeAccessor,
  1377  		_ storageprovisioner.FilesystemAccessor,
  1378  		_ storageprovisioner.LifecycleManager,
  1379  		_ storageprovisioner.EnvironAccessor,
  1380  		_ storageprovisioner.MachineAccessor,
  1381  	) worker.Worker {
  1382  		// storageDir is empty for environ storage provisioners
  1383  		if storageDir == "" {
  1384  			c.Check(scope, gc.Equals, s.State.EnvironTag())
  1385  			c.Check(atomic.AddUint32(&environWorkers, 1), gc.Equals, uint32(1))
  1386  			atomic.AddUint32(&numWorkers, 1)
  1387  		}
  1388  		if storageDir != "" {
  1389  			c.Check(scope, gc.Equals, m.Tag())
  1390  			c.Check(atomic.AddUint32(&machineWorkers, 1), gc.Equals, uint32(1))
  1391  			atomic.AddUint32(&numWorkers, 1)
  1392  		}
  1393  		if atomic.LoadUint32(&environWorkers) == 1 && atomic.LoadUint32(&machineWorkers) == 1 {
  1394  			close(started)
  1395  		}
  1396  		return worker.NewNoOpWorker()
  1397  	}
  1398  	s.PatchValue(&newStorageWorker, newWorker)
  1399  
  1400  	// Start the machine agent.
  1401  	a := s.newAgent(c, m)
  1402  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1403  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1404  
  1405  	// Wait for worker to be started.
  1406  	select {
  1407  	case <-started:
  1408  		c.Assert(atomic.LoadUint32(&numWorkers), gc.Equals, uint32(2))
  1409  	case <-time.After(coretesting.LongWait):
  1410  		c.Fatalf("timeout while waiting for storage worker to start")
  1411  	}
  1412  }
  1413  
  1414  func (s *MachineSuite) TestMachineAgentRunsCertificateUpdateWorkerForStateServer(c *gc.C) {
  1415  	started := make(chan struct{})
  1416  	newUpdater := func(certupdater.AddressWatcher, certupdater.StateServingInfoGetter, certupdater.EnvironConfigGetter,
  1417  		certupdater.StateServingInfoSetter, chan params.StateServingInfo,
  1418  	) worker.Worker {
  1419  		close(started)
  1420  		return worker.NewNoOpWorker()
  1421  	}
  1422  	s.PatchValue(&newCertificateUpdater, newUpdater)
  1423  
  1424  	// Start the machine agent.
  1425  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1426  	a := s.newAgent(c, m)
  1427  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1428  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1429  
  1430  	// Wait for worker to be started.
  1431  	select {
  1432  	case <-started:
  1433  	case <-time.After(coretesting.LongWait):
  1434  		c.Fatalf("timeout while waiting for certificate update worker to start")
  1435  	}
  1436  }
  1437  
  1438  func (s *MachineSuite) TestMachineAgentDoesNotRunsCertificateUpdateWorkerForNonStateServer(c *gc.C) {
  1439  	started := make(chan struct{})
  1440  	newUpdater := func(certupdater.AddressWatcher, certupdater.StateServingInfoGetter, certupdater.EnvironConfigGetter,
  1441  		certupdater.StateServingInfoSetter, chan params.StateServingInfo,
  1442  	) worker.Worker {
  1443  		close(started)
  1444  		return worker.NewNoOpWorker()
  1445  	}
  1446  	s.PatchValue(&newCertificateUpdater, newUpdater)
  1447  
  1448  	// Start the machine agent.
  1449  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1450  	a := s.newAgent(c, m)
  1451  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1452  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1453  
  1454  	// Ensure the worker is not started.
  1455  	select {
  1456  	case <-started:
  1457  		c.Fatalf("certificate update worker unexpectedly started")
  1458  	case <-time.After(coretesting.ShortWait):
  1459  	}
  1460  }
  1461  
  1462  func (s *MachineSuite) TestCertificateUpdateWorkerUpdatesCertificate(c *gc.C) {
  1463  	// Set up the machine agent.
  1464  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1465  	a := s.newAgent(c, m)
  1466  	a.ReadConfig(names.NewMachineTag(m.Id()).String())
  1467  
  1468  	// Set up check that certificate has been updated.
  1469  	updated := make(chan struct{})
  1470  	go func() {
  1471  		for {
  1472  			stateInfo, _ := a.CurrentConfig().StateServingInfo()
  1473  			srvCert, err := cert.ParseCert(stateInfo.Cert)
  1474  			c.Assert(err, jc.ErrorIsNil)
  1475  			sanIPs := make([]string, len(srvCert.IPAddresses))
  1476  			for i, ip := range srvCert.IPAddresses {
  1477  				sanIPs[i] = ip.String()
  1478  			}
  1479  			if len(sanIPs) == 1 && sanIPs[0] == "0.1.2.3" {
  1480  				close(updated)
  1481  				break
  1482  			}
  1483  			time.Sleep(10 * time.Millisecond)
  1484  		}
  1485  	}()
  1486  
  1487  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1488  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1489  	// Wait for certificate to be updated.
  1490  	select {
  1491  	case <-updated:
  1492  	case <-time.After(coretesting.LongWait):
  1493  		c.Fatalf("timeout while waiting for certificate to be updated")
  1494  	}
  1495  }
  1496  
  1497  func (s *MachineSuite) TestCertificateDNSUpdated(c *gc.C) {
  1498  	// Disable the certificate work so it doesn't update the certificate.
  1499  	newUpdater := func(certupdater.AddressWatcher, certupdater.StateServingInfoGetter, certupdater.EnvironConfigGetter,
  1500  		certupdater.StateServingInfoSetter, chan params.StateServingInfo,
  1501  	) worker.Worker {
  1502  		return worker.NewNoOpWorker()
  1503  	}
  1504  	s.PatchValue(&newCertificateUpdater, newUpdater)
  1505  
  1506  	// Set up the machine agent.
  1507  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1508  	a := s.newAgent(c, m)
  1509  
  1510  	// Set up check that certificate has been updated when the agent starts.
  1511  	updated := make(chan struct{})
  1512  	expectedDnsNames := set.NewStrings("local", "juju-apiserver", "juju-mongodb")
  1513  	go func() {
  1514  		for {
  1515  			stateInfo, _ := a.CurrentConfig().StateServingInfo()
  1516  			srvCert, err := cert.ParseCert(stateInfo.Cert)
  1517  			c.Assert(err, jc.ErrorIsNil)
  1518  			certDnsNames := set.NewStrings(srvCert.DNSNames...)
  1519  			if !expectedDnsNames.Difference(certDnsNames).IsEmpty() {
  1520  				continue
  1521  			}
  1522  			pemContent, err := ioutil.ReadFile(filepath.Join(s.DataDir(), "server.pem"))
  1523  			c.Assert(err, jc.ErrorIsNil)
  1524  			if string(pemContent) == stateInfo.Cert+"\n"+stateInfo.PrivateKey {
  1525  				close(updated)
  1526  				break
  1527  			}
  1528  			time.Sleep(10 * time.Millisecond)
  1529  		}
  1530  	}()
  1531  
  1532  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1533  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1534  	// Wait for certificate to be updated.
  1535  	select {
  1536  	case <-updated:
  1537  	case <-time.After(coretesting.LongWait):
  1538  		c.Fatalf("timeout while waiting for certificate to be updated")
  1539  	}
  1540  }
  1541  
  1542  func (s *MachineSuite) TestMachineAgentNetworkerMode(c *gc.C) {
  1543  	tests := []struct {
  1544  		about          string
  1545  		managedNetwork bool
  1546  		jobs           []state.MachineJob
  1547  		intrusiveMode  bool
  1548  	}{{
  1549  		about:          "network management enabled, network management job set",
  1550  		managedNetwork: true,
  1551  		jobs:           []state.MachineJob{state.JobHostUnits, state.JobManageNetworking},
  1552  		intrusiveMode:  true,
  1553  	}, {
  1554  		about:          "network management disabled, network management job set",
  1555  		managedNetwork: false,
  1556  		jobs:           []state.MachineJob{state.JobHostUnits, state.JobManageNetworking},
  1557  		intrusiveMode:  false,
  1558  	}, {
  1559  		about:          "network management enabled, network management job not set",
  1560  		managedNetwork: true,
  1561  		jobs:           []state.MachineJob{state.JobHostUnits},
  1562  		intrusiveMode:  false,
  1563  	}, {
  1564  		about:          "network management disabled, network management job not set",
  1565  		managedNetwork: false,
  1566  		jobs:           []state.MachineJob{state.JobHostUnits},
  1567  		intrusiveMode:  false,
  1568  	}}
  1569  	// Perform tests.
  1570  	for i, test := range tests {
  1571  		c.Logf("test #%d: %s", i, test.about)
  1572  
  1573  		modeCh := make(chan bool, 1)
  1574  		s.AgentSuite.PatchValue(&newNetworker, func(
  1575  			st apinetworker.State,
  1576  			conf agent.Config,
  1577  			intrusiveMode bool,
  1578  			configBaseDir string,
  1579  		) (*networker.Networker, error) {
  1580  			select {
  1581  			case modeCh <- intrusiveMode:
  1582  			default:
  1583  			}
  1584  			return networker.NewNetworker(st, conf, intrusiveMode, configBaseDir)
  1585  		})
  1586  
  1587  		attrs := coretesting.Attrs{"disable-network-management": !test.managedNetwork}
  1588  		err := s.BackingState.UpdateEnvironConfig(attrs, nil, nil)
  1589  		c.Assert(err, jc.ErrorIsNil)
  1590  
  1591  		m, _, _ := s.primeAgent(c, version.Current, test.jobs...)
  1592  		a := s.newAgent(c, m)
  1593  		defer a.Stop()
  1594  		doneCh := make(chan error)
  1595  		go func() {
  1596  			doneCh <- a.Run(nil)
  1597  		}()
  1598  
  1599  		select {
  1600  		case intrusiveMode := <-modeCh:
  1601  			if intrusiveMode != test.intrusiveMode {
  1602  				c.Fatalf("expected networker intrusive mode = %v, got mode = %v", test.intrusiveMode, intrusiveMode)
  1603  			}
  1604  		case <-time.After(coretesting.LongWait):
  1605  			c.Fatalf("timed out waiting for the networker to start")
  1606  		}
  1607  		s.waitStopped(c, state.JobManageNetworking, a, doneCh)
  1608  	}
  1609  }
  1610  
  1611  func (s *MachineSuite) TestMachineAgentUpgradeMongo(c *gc.C) {
  1612  	m, agentConfig, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1613  	agentConfig.SetUpgradedToVersion(version.MustParse("1.18.0"))
  1614  	err := agentConfig.Write()
  1615  	c.Assert(err, jc.ErrorIsNil)
  1616  	err = s.State.MongoSession().DB("admin").RemoveUser(m.Tag().String())
  1617  	c.Assert(err, jc.ErrorIsNil)
  1618  
  1619  	s.fakeEnsureMongo.ServiceInstalled = true
  1620  	s.fakeEnsureMongo.ReplicasetInitiated = false
  1621  
  1622  	s.AgentSuite.PatchValue(&ensureMongoAdminUser, func(p mongo.EnsureAdminUserParams) (bool, error) {
  1623  		err := s.State.MongoSession().DB("admin").AddUser(p.User, p.Password, false)
  1624  		c.Assert(err, jc.ErrorIsNil)
  1625  		return true, nil
  1626  	})
  1627  
  1628  	stateOpened := make(chan interface{}, 1)
  1629  	s.AgentSuite.PatchValue(&reportOpenedState, func(st io.Closer) {
  1630  		select {
  1631  		case stateOpened <- st:
  1632  		default:
  1633  		}
  1634  	})
  1635  
  1636  	// Start the machine agent, and wait for state to be opened.
  1637  	a := s.newAgent(c, m)
  1638  	done := make(chan error)
  1639  	go func() { done <- a.Run(nil) }()
  1640  	defer a.Stop() // in case of failure
  1641  	select {
  1642  	case st := <-stateOpened:
  1643  		c.Assert(st, gc.NotNil)
  1644  	case <-time.After(coretesting.LongWait):
  1645  		c.Fatalf("state not opened")
  1646  	}
  1647  	s.waitStopped(c, state.JobManageEnviron, a, done)
  1648  	c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1)
  1649  	c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 1)
  1650  }
  1651  
  1652  func (s *MachineSuite) TestMachineAgentSetsPrepareRestore(c *gc.C) {
  1653  	// Start the machine agent.
  1654  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1655  	a := s.newAgent(c, m)
  1656  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1657  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1658  	c.Check(a.IsRestorePreparing(), jc.IsFalse)
  1659  	c.Check(a.IsRestoreRunning(), jc.IsFalse)
  1660  	err := a.PrepareRestore()
  1661  	c.Assert(err, jc.ErrorIsNil)
  1662  	c.Assert(a.IsRestorePreparing(), jc.IsTrue)
  1663  	c.Assert(a.IsRestoreRunning(), jc.IsFalse)
  1664  	err = a.PrepareRestore()
  1665  	c.Assert(err, gc.ErrorMatches, "already in restore mode")
  1666  }
  1667  
  1668  func (s *MachineSuite) TestMachineAgentSetsRestoreInProgress(c *gc.C) {
  1669  	// Start the machine agent.
  1670  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1671  	a := s.newAgent(c, m)
  1672  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1673  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1674  	c.Check(a.IsRestorePreparing(), jc.IsFalse)
  1675  	c.Check(a.IsRestoreRunning(), jc.IsFalse)
  1676  	err := a.PrepareRestore()
  1677  	c.Assert(err, jc.ErrorIsNil)
  1678  	c.Assert(a.IsRestorePreparing(), jc.IsTrue)
  1679  	err = a.BeginRestore()
  1680  	c.Assert(err, jc.ErrorIsNil)
  1681  	c.Assert(a.IsRestoreRunning(), jc.IsTrue)
  1682  	err = a.BeginRestore()
  1683  	c.Assert(err, gc.ErrorMatches, "already restoring")
  1684  }
  1685  
  1686  func (s *MachineSuite) TestMachineAgentRestoreRequiresPrepare(c *gc.C) {
  1687  	// Start the machine agent.
  1688  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1689  	a := s.newAgent(c, m)
  1690  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1691  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1692  	c.Check(a.IsRestorePreparing(), jc.IsFalse)
  1693  	c.Check(a.IsRestoreRunning(), jc.IsFalse)
  1694  	err := a.BeginRestore()
  1695  	c.Assert(err, gc.ErrorMatches, "not in restore mode, cannot begin restoration")
  1696  	c.Assert(a.IsRestoreRunning(), jc.IsFalse)
  1697  }
  1698  func (s *MachineSuite) TestMachineAgentAPIWorkerErrorClosesAPI(c *gc.C) {
  1699  	// Start the machine agent.
  1700  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1701  	a := s.newAgent(c, m)
  1702  	a.apiStateUpgrader = &machineAgentUpgrader{}
  1703  
  1704  	closedAPI := make(chan io.Closer, 1)
  1705  	s.AgentSuite.PatchValue(&reportClosedMachineAPI, func(st io.Closer) {
  1706  		select {
  1707  		case closedAPI <- st:
  1708  			close(closedAPI)
  1709  		default:
  1710  		}
  1711  	})
  1712  
  1713  	worker, err := a.APIWorker()
  1714  
  1715  	select {
  1716  	case closed := <-closedAPI:
  1717  		c.Assert(closed, gc.NotNil)
  1718  	case <-time.After(coretesting.LongWait):
  1719  		c.Fatalf("API not opened")
  1720  	}
  1721  
  1722  	c.Assert(worker, gc.IsNil)
  1723  	c.Assert(err, gc.ErrorMatches, "cannot set machine agent version: test failure")
  1724  	c.Assert(a.isAgentUpgradePending(), jc.IsTrue)
  1725  }
  1726  
  1727  type machineAgentUpgrader struct{}
  1728  
  1729  func (m *machineAgentUpgrader) SetVersion(s string, v version.Binary) error {
  1730  	return errors.New("test failure")
  1731  }
  1732  
  1733  func (s *MachineSuite) TestNewEnvironmentStartsNewWorkers(c *gc.C) {
  1734  	_, closer := s.setUpNewEnvironment(c)
  1735  	defer closer()
  1736  	expectedWorkers, closer := s.setUpAgent(c)
  1737  	defer closer()
  1738  
  1739  	r1 := s.singularRecord.nextRunner(c)
  1740  	workers := r1.waitForWorker(c, "firewaller")
  1741  	c.Assert(workers, jc.SameContents, expectedWorkers)
  1742  }
  1743  
  1744  func (s *MachineSuite) TestNewStorageWorkerIsScopedToNewEnviron(c *gc.C) {
  1745  	st, closer := s.setUpNewEnvironment(c)
  1746  	defer closer()
  1747  
  1748  	// Check that newStorageWorker is called and the environ tag is scoped to
  1749  	// that of the new environment tag.
  1750  	started := make(chan struct{})
  1751  	newWorker := func(
  1752  		scope names.Tag,
  1753  		storageDir string,
  1754  		_ storageprovisioner.VolumeAccessor,
  1755  		_ storageprovisioner.FilesystemAccessor,
  1756  		_ storageprovisioner.LifecycleManager,
  1757  		_ storageprovisioner.EnvironAccessor,
  1758  		_ storageprovisioner.MachineAccessor,
  1759  	) worker.Worker {
  1760  		// storageDir is empty for environ storage provisioners
  1761  		if storageDir == "" {
  1762  			// If this is the worker for the new environment,
  1763  			// close the channel.
  1764  			if scope == st.EnvironTag() {
  1765  				close(started)
  1766  			}
  1767  		}
  1768  		return worker.NewNoOpWorker()
  1769  	}
  1770  	s.PatchValue(&newStorageWorker, newWorker)
  1771  
  1772  	_, closer = s.setUpAgent(c)
  1773  	defer closer()
  1774  
  1775  	// Wait for newStorageWorker to be started.
  1776  	select {
  1777  	case <-started:
  1778  	case <-time.After(coretesting.LongWait):
  1779  		c.Fatalf("timeout while waiting for storage worker to start")
  1780  	}
  1781  }
  1782  
  1783  func (s *MachineSuite) setUpNewEnvironment(c *gc.C) (newSt *state.State, closer func()) {
  1784  	// Create a new environment, tests can now watch if workers start for it.
  1785  	newSt = s.Factory.MakeEnvironment(c, &factory.EnvParams{
  1786  		ConfigAttrs: map[string]interface{}{
  1787  			"state-server": false,
  1788  		},
  1789  		Prepare: true,
  1790  	})
  1791  	return newSt, func() {
  1792  		newSt.Close()
  1793  	}
  1794  }
  1795  
  1796  func (s *MachineSuite) setUpAgent(c *gc.C) (expectedWorkers []string, closer func()) {
  1797  	expectedWorkers = make([]string, 0, len(perEnvSingularWorkers)+1)
  1798  	for _, w := range perEnvSingularWorkers {
  1799  		expectedWorkers = append(expectedWorkers, w)
  1800  		if w == "environ-provisioner" {
  1801  			expectedWorkers = append(expectedWorkers, "environ-storageprovisioner")
  1802  		}
  1803  	}
  1804  	s.PatchValue(&watcher.Period, 100*time.Millisecond)
  1805  
  1806  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1807  	a := s.newAgent(c, m)
  1808  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1809  
  1810  	_ = s.singularRecord.nextRunner(c) // Don't care about this one for this test.
  1811  
  1812  	// Wait for the workers for the initial env to start. The
  1813  	// firewaller is the last worker started for a new environment.
  1814  	r0 := s.singularRecord.nextRunner(c)
  1815  	workers := r0.waitForWorker(c, "firewaller")
  1816  	c.Assert(workers, jc.SameContents, expectedWorkers)
  1817  
  1818  	return expectedWorkers, func() {
  1819  		c.Check(a.Stop(), jc.ErrorIsNil)
  1820  	}
  1821  }
  1822  
  1823  func (s *MachineSuite) TestReplicasetInitiation(c *gc.C) {
  1824  	if runtime.GOOS == "windows" {
  1825  		c.Skip("state servers on windows aren't supported")
  1826  	}
  1827  
  1828  	s.fakeEnsureMongo.ReplicasetInitiated = false
  1829  
  1830  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1831  	a := s.newAgent(c, m)
  1832  	agentConfig := a.CurrentConfig()
  1833  
  1834  	err := a.ensureMongoServer(agentConfig)
  1835  	c.Assert(err, jc.ErrorIsNil)
  1836  
  1837  	c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1)
  1838  	c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 1)
  1839  }
  1840  
  1841  func (s *MachineSuite) TestReplicasetAlreadyInitiated(c *gc.C) {
  1842  	if runtime.GOOS == "windows" {
  1843  		c.Skip("state servers on windows aren't supported")
  1844  	}
  1845  
  1846  	s.fakeEnsureMongo.ReplicasetInitiated = true
  1847  
  1848  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1849  	a := s.newAgent(c, m)
  1850  	agentConfig := a.CurrentConfig()
  1851  
  1852  	err := a.ensureMongoServer(agentConfig)
  1853  	c.Assert(err, jc.ErrorIsNil)
  1854  
  1855  	c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1)
  1856  	c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 0)
  1857  }
  1858  
  1859  func (s *MachineSuite) TestReplicasetInitForNewStateServer(c *gc.C) {
  1860  	if runtime.GOOS == "windows" {
  1861  		c.Skip("state servers on windows aren't supported")
  1862  	}
  1863  
  1864  	s.fakeEnsureMongo.ServiceInstalled = false
  1865  	s.fakeEnsureMongo.ReplicasetInitiated = true
  1866  
  1867  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1868  	a := s.newAgent(c, m)
  1869  	agentConfig := a.CurrentConfig()
  1870  
  1871  	err := a.ensureMongoServer(agentConfig)
  1872  	c.Assert(err, jc.ErrorIsNil)
  1873  
  1874  	c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1)
  1875  	c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 0)
  1876  }
  1877  
  1878  // MachineWithCharmsSuite provides infrastructure for tests which need to
  1879  // work with charms.
  1880  type MachineWithCharmsSuite struct {
  1881  	commonMachineSuite
  1882  	charmtesting.CharmSuite
  1883  
  1884  	machine *state.Machine
  1885  }
  1886  
  1887  func (s *MachineWithCharmsSuite) SetUpSuite(c *gc.C) {
  1888  	s.commonMachineSuite.SetUpSuite(c)
  1889  	s.CharmSuite.SetUpSuite(c, &s.commonMachineSuite.JujuConnSuite)
  1890  }
  1891  
  1892  func (s *MachineWithCharmsSuite) TearDownSuite(c *gc.C) {
  1893  	s.commonMachineSuite.TearDownSuite(c)
  1894  	s.CharmSuite.TearDownSuite(c)
  1895  }
  1896  
  1897  func (s *MachineWithCharmsSuite) SetUpTest(c *gc.C) {
  1898  	s.commonMachineSuite.SetUpTest(c)
  1899  	s.CharmSuite.SetUpTest(c)
  1900  }
  1901  
  1902  func (s *MachineWithCharmsSuite) TearDownTest(c *gc.C) {
  1903  	s.commonMachineSuite.TearDownTest(c)
  1904  	s.CharmSuite.TearDownTest(c)
  1905  }
  1906  
  1907  func (s *MachineWithCharmsSuite) TestManageEnvironRunsCharmRevisionUpdater(c *gc.C) {
  1908  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1909  
  1910  	s.SetupScenario(c)
  1911  
  1912  	a := s.newAgent(c, m)
  1913  	go func() {
  1914  		c.Check(a.Run(nil), jc.ErrorIsNil)
  1915  	}()
  1916  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1917  
  1918  	checkRevision := func() bool {
  1919  		curl := charm.MustParseURL("cs:quantal/mysql")
  1920  		placeholder, err := s.State.LatestPlaceholderCharm(curl)
  1921  		return err == nil && placeholder.String() == curl.WithRevision(23).String()
  1922  	}
  1923  	success := false
  1924  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
  1925  		if success = checkRevision(); success {
  1926  			break
  1927  		}
  1928  	}
  1929  	c.Assert(success, jc.IsTrue)
  1930  }
  1931  
  1932  type mongoSuite struct {
  1933  	coretesting.BaseSuite
  1934  }
  1935  
  1936  func (s *mongoSuite) TestStateWorkerDialSetsWriteMajority(c *gc.C) {
  1937  	s.testStateWorkerDialSetsWriteMajority(c, true)
  1938  }
  1939  
  1940  func (s *mongoSuite) TestStateWorkerDialDoesNotSetWriteMajorityWithoutReplsetConfig(c *gc.C) {
  1941  	s.testStateWorkerDialSetsWriteMajority(c, false)
  1942  }
  1943  
  1944  func (s *mongoSuite) testStateWorkerDialSetsWriteMajority(c *gc.C, configureReplset bool) {
  1945  	inst := gitjujutesting.MgoInstance{
  1946  		EnableJournal: true,
  1947  		Params:        []string{"--replSet", "juju"},
  1948  	}
  1949  	err := inst.Start(coretesting.Certs)
  1950  	c.Assert(err, jc.ErrorIsNil)
  1951  	defer inst.Destroy()
  1952  
  1953  	var expectedWMode string
  1954  	dialOpts := stateWorkerDialOpts
  1955  	if configureReplset {
  1956  		info := inst.DialInfo()
  1957  		args := peergrouper.InitiateMongoParams{
  1958  			DialInfo:       info,
  1959  			MemberHostPort: inst.Addr(),
  1960  		}
  1961  		err = peergrouper.MaybeInitiateMongoServer(args)
  1962  		c.Assert(err, jc.ErrorIsNil)
  1963  		expectedWMode = "majority"
  1964  	} else {
  1965  		dialOpts.Direct = true
  1966  	}
  1967  
  1968  	mongoInfo := mongo.Info{
  1969  		Addrs:  []string{inst.Addr()},
  1970  		CACert: coretesting.CACert,
  1971  	}
  1972  	session, err := mongo.DialWithInfo(mongoInfo, dialOpts)
  1973  	c.Assert(err, jc.ErrorIsNil)
  1974  	defer session.Close()
  1975  
  1976  	safe := session.Safe()
  1977  	c.Assert(safe, gc.NotNil)
  1978  	c.Assert(safe.WMode, gc.Equals, expectedWMode)
  1979  	c.Assert(safe.J, jc.IsTrue) // always enabled
  1980  }
  1981  
  1982  type shouldWriteProxyFilesSuite struct {
  1983  	coretesting.BaseSuite
  1984  }
  1985  
  1986  var _ = gc.Suite(&shouldWriteProxyFilesSuite{})
  1987  
  1988  func (s *shouldWriteProxyFilesSuite) TestAll(c *gc.C) {
  1989  	tests := []struct {
  1990  		description  string
  1991  		providerType string
  1992  		machineId    string
  1993  		expect       bool
  1994  	}{{
  1995  		description:  "local provider machine 0 must not write",
  1996  		providerType: "local",
  1997  		machineId:    "0",
  1998  		expect:       false,
  1999  	}, {
  2000  		description:  "local provider other machine must write 1",
  2001  		providerType: "local",
  2002  		machineId:    "0/kvm/0",
  2003  		expect:       true,
  2004  	}, {
  2005  		description:  "local provider other machine must write 2",
  2006  		providerType: "local",
  2007  		machineId:    "123",
  2008  		expect:       true,
  2009  	}, {
  2010  		description:  "other provider machine 0 must write",
  2011  		providerType: "anything",
  2012  		machineId:    "0",
  2013  		expect:       true,
  2014  	}, {
  2015  		description:  "other provider other machine must write 1",
  2016  		providerType: "dummy",
  2017  		machineId:    "0/kvm/0",
  2018  		expect:       true,
  2019  	}, {
  2020  		description:  "other provider other machine must write 2",
  2021  		providerType: "blahblahblah",
  2022  		machineId:    "123",
  2023  		expect:       true,
  2024  	}}
  2025  	for i, test := range tests {
  2026  		c.Logf("test %d: %s", i, test.description)
  2027  		mockConf := &mockAgentConfig{
  2028  			providerType: test.providerType,
  2029  			tag:          names.NewMachineTag(test.machineId),
  2030  		}
  2031  		c.Check(shouldWriteProxyFiles(mockConf), gc.Equals, test.expect)
  2032  	}
  2033  }
  2034  
  2035  type mockAgentConfig struct {
  2036  	agent.Config
  2037  	providerType string
  2038  	tag          names.Tag
  2039  }
  2040  
  2041  func (m *mockAgentConfig) Tag() names.Tag {
  2042  	return m.tag
  2043  }
  2044  
  2045  func (m *mockAgentConfig) Value(key string) string {
  2046  	if key == agent.ProviderType {
  2047  		return m.providerType
  2048  	}
  2049  	return ""
  2050  }
  2051  
  2052  type singularRunnerRecord struct {
  2053  	runnerC chan *fakeSingularRunner
  2054  }
  2055  
  2056  func newSingularRunnerRecord() *singularRunnerRecord {
  2057  	return &singularRunnerRecord{
  2058  		runnerC: make(chan *fakeSingularRunner, 5),
  2059  	}
  2060  }
  2061  
  2062  func (r *singularRunnerRecord) newSingularRunner(runner worker.Runner, conn singular.Conn) (worker.Runner, error) {
  2063  	sr, err := singular.New(runner, conn)
  2064  	if err != nil {
  2065  		return nil, err
  2066  	}
  2067  	fakeRunner := &fakeSingularRunner{
  2068  		Runner: sr,
  2069  		startC: make(chan string, 64),
  2070  	}
  2071  	r.runnerC <- fakeRunner
  2072  	return fakeRunner, nil
  2073  }
  2074  
  2075  // nextRunner blocks until a new singular runner is created.
  2076  func (r *singularRunnerRecord) nextRunner(c *gc.C) *fakeSingularRunner {
  2077  	for {
  2078  		select {
  2079  		case r := <-r.runnerC:
  2080  			return r
  2081  		case <-time.After(coretesting.LongWait):
  2082  			c.Fatal("timed out waiting for singular runner to be created")
  2083  		}
  2084  	}
  2085  }
  2086  
  2087  type fakeSingularRunner struct {
  2088  	worker.Runner
  2089  	startC chan string
  2090  }
  2091  
  2092  func (r *fakeSingularRunner) StartWorker(name string, start func() (worker.Worker, error)) error {
  2093  	r.startC <- name
  2094  	return r.Runner.StartWorker(name, start)
  2095  }
  2096  
  2097  // waitForWorker waits for a given worker to be started, returning all
  2098  // workers started while waiting.
  2099  func (r *fakeSingularRunner) waitForWorker(c *gc.C, target string) []string {
  2100  	var seen []string
  2101  	timeout := time.After(coretesting.LongWait)
  2102  	for {
  2103  		select {
  2104  		case workerName := <-r.startC:
  2105  			seen = append(seen, workerName)
  2106  			if workerName == target {
  2107  				return seen
  2108  			}
  2109  		case <-timeout:
  2110  			c.Fatal("timed out waiting for " + target)
  2111  		}
  2112  	}
  2113  }
  2114  
  2115  // waitForWorkers waits for a given worker to be started, returning all
  2116  // workers started while waiting.
  2117  func (r *fakeSingularRunner) waitForWorkers(c *gc.C, targets []string) []string {
  2118  	var seen []string
  2119  	seenTargets := make(map[string]bool)
  2120  	numSeenTargets := 0
  2121  	timeout := time.After(coretesting.LongWait)
  2122  	for {
  2123  		select {
  2124  		case workerName := <-r.startC:
  2125  			if seenTargets[workerName] == true {
  2126  				c.Fatal("worker started twice: " + workerName)
  2127  			}
  2128  			seenTargets[workerName] = true
  2129  			numSeenTargets++
  2130  			seen = append(seen, workerName)
  2131  			if numSeenTargets == len(targets) {
  2132  				return seen
  2133  			}
  2134  		case <-timeout:
  2135  			c.Fatalf("timed out waiting for %v", targets)
  2136  		}
  2137  	}
  2138  }
  2139  
  2140  type mockMetricAPI struct {
  2141  	stop          chan struct{}
  2142  	cleanUpCalled chan struct{}
  2143  	sendCalled    chan struct{}
  2144  }
  2145  
  2146  func newMockMetricAPI() *mockMetricAPI {
  2147  	return &mockMetricAPI{
  2148  		stop:          make(chan struct{}),
  2149  		cleanUpCalled: make(chan struct{}),
  2150  		sendCalled:    make(chan struct{}),
  2151  	}
  2152  }
  2153  
  2154  func (m *mockMetricAPI) CleanupOldMetrics() error {
  2155  	go func() {
  2156  		select {
  2157  		case m.cleanUpCalled <- struct{}{}:
  2158  		case <-m.stop:
  2159  			break
  2160  		}
  2161  	}()
  2162  	return nil
  2163  }
  2164  
  2165  func (m *mockMetricAPI) SendMetrics() error {
  2166  	go func() {
  2167  		select {
  2168  		case m.sendCalled <- struct{}{}:
  2169  		case <-m.stop:
  2170  			break
  2171  		}
  2172  	}()
  2173  	return nil
  2174  }
  2175  
  2176  func (m *mockMetricAPI) SendCalled() <-chan struct{} {
  2177  	return m.sendCalled
  2178  }
  2179  
  2180  func (m *mockMetricAPI) CleanupCalled() <-chan struct{} {
  2181  	return m.cleanUpCalled
  2182  }
  2183  
  2184  func (m *mockMetricAPI) Stop() {
  2185  	close(m.stop)
  2186  }
  2187  
  2188  func mkdtemp(prefix string) string {
  2189  	d, err := ioutil.TempDir("", prefix)
  2190  	if err != nil {
  2191  		panic(err)
  2192  	}
  2193  	return d
  2194  }
  2195  
  2196  func mktemp(prefix string, content string) string {
  2197  	f, err := ioutil.TempFile("", prefix)
  2198  	if err != nil {
  2199  		panic(err)
  2200  	}
  2201  	_, err = f.WriteString(content)
  2202  	if err != nil {
  2203  		panic(err)
  2204  	}
  2205  	f.Close()
  2206  	return f.Name()
  2207  }