github.com/mhilton/juju-juju@v0.0.0-20150901100907-a94dd2c73455/cmd/jujud/agent/machine_test.go (about)

     1  // Copyright 2012, 2013 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package agent
     5  
     6  import (
     7  	"io"
     8  	"io/ioutil"
     9  	"os"
    10  	"path/filepath"
    11  	"reflect"
    12  	"runtime"
    13  	"strings"
    14  	"sync/atomic"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/juju/cmd"
    19  	"github.com/juju/errors"
    20  	"github.com/juju/names"
    21  	gitjujutesting "github.com/juju/testing"
    22  	jc "github.com/juju/testing/checkers"
    23  	"github.com/juju/utils/clock"
    24  	"github.com/juju/utils/proxy"
    25  	"github.com/juju/utils/set"
    26  	"github.com/juju/utils/symlink"
    27  	gc "gopkg.in/check.v1"
    28  	"gopkg.in/juju/charm.v5"
    29  	"gopkg.in/juju/charm.v5/charmrepo"
    30  	"gopkg.in/natefinch/lumberjack.v2"
    31  
    32  	"github.com/juju/juju/agent"
    33  	"github.com/juju/juju/api"
    34  	apiaddresser "github.com/juju/juju/api/addresser"
    35  	apideployer "github.com/juju/juju/api/deployer"
    36  	apienvironment "github.com/juju/juju/api/environment"
    37  	apifirewaller "github.com/juju/juju/api/firewaller"
    38  	apiinstancepoller "github.com/juju/juju/api/instancepoller"
    39  	apimetricsmanager "github.com/juju/juju/api/metricsmanager"
    40  	apinetworker "github.com/juju/juju/api/networker"
    41  	apirsyslog "github.com/juju/juju/api/rsyslog"
    42  	charmtesting "github.com/juju/juju/apiserver/charmrevisionupdater/testing"
    43  	"github.com/juju/juju/apiserver/params"
    44  	"github.com/juju/juju/cert"
    45  	agenttesting "github.com/juju/juju/cmd/jujud/agent/testing"
    46  	cmdutil "github.com/juju/juju/cmd/jujud/util"
    47  	"github.com/juju/juju/cmd/jujud/util/password"
    48  	lxctesting "github.com/juju/juju/container/lxc/testing"
    49  	"github.com/juju/juju/environs/config"
    50  	envtesting "github.com/juju/juju/environs/testing"
    51  	"github.com/juju/juju/feature"
    52  	"github.com/juju/juju/instance"
    53  	"github.com/juju/juju/juju"
    54  	jujutesting "github.com/juju/juju/juju/testing"
    55  	"github.com/juju/juju/mongo"
    56  	"github.com/juju/juju/network"
    57  	"github.com/juju/juju/provider/dummy"
    58  	"github.com/juju/juju/service/upstart"
    59  	"github.com/juju/juju/state"
    60  	"github.com/juju/juju/state/watcher"
    61  	"github.com/juju/juju/storage"
    62  	coretesting "github.com/juju/juju/testing"
    63  	"github.com/juju/juju/testing/factory"
    64  	"github.com/juju/juju/tools"
    65  	"github.com/juju/juju/utils/ssh"
    66  	sshtesting "github.com/juju/juju/utils/ssh/testing"
    67  	"github.com/juju/juju/version"
    68  	"github.com/juju/juju/worker"
    69  	"github.com/juju/juju/worker/addresser"
    70  	"github.com/juju/juju/worker/apicaller"
    71  	"github.com/juju/juju/worker/authenticationworker"
    72  	"github.com/juju/juju/worker/certupdater"
    73  	"github.com/juju/juju/worker/deployer"
    74  	"github.com/juju/juju/worker/diskmanager"
    75  	"github.com/juju/juju/worker/instancepoller"
    76  	"github.com/juju/juju/worker/logsender"
    77  	"github.com/juju/juju/worker/machiner"
    78  	"github.com/juju/juju/worker/networker"
    79  	"github.com/juju/juju/worker/peergrouper"
    80  	"github.com/juju/juju/worker/proxyupdater"
    81  	"github.com/juju/juju/worker/resumer"
    82  	"github.com/juju/juju/worker/rsyslog"
    83  	"github.com/juju/juju/worker/singular"
    84  	"github.com/juju/juju/worker/storageprovisioner"
    85  	"github.com/juju/juju/worker/upgrader"
    86  )
    87  
    88  var (
    89  	_ = gc.Suite(&MachineSuite{})
    90  	_ = gc.Suite(&MachineWithCharmsSuite{})
    91  	_ = gc.Suite(&mongoSuite{})
    92  )
    93  
    94  func TestPackage(t *testing.T) {
    95  	// TODO(waigani) 2014-03-19 bug 1294458
    96  	// Refactor to use base suites
    97  
    98  	// Change the path to "juju-run", so that the
    99  	// tests don't try to write to /usr/local/bin.
   100  	JujuRun = mktemp("juju-run", "")
   101  	defer os.Remove(JujuRun)
   102  
   103  	coretesting.MgoTestPackage(t)
   104  }
   105  
   106  type commonMachineSuite struct {
   107  	singularRecord *singularRunnerRecord
   108  	lxctesting.TestSuite
   109  	fakeEnsureMongo *agenttesting.FakeEnsureMongo
   110  	AgentSuite
   111  }
   112  
   113  func (s *commonMachineSuite) SetUpSuite(c *gc.C) {
   114  	s.AgentSuite.SetUpSuite(c)
   115  	s.TestSuite.SetUpSuite(c)
   116  	// We're not interested in whether EnsureJujudPassword works here since we test it somewhere else
   117  	s.PatchValue(&password.EnsureJujudPassword, func() error { return nil })
   118  }
   119  
   120  func (s *commonMachineSuite) TearDownSuite(c *gc.C) {
   121  	s.TestSuite.TearDownSuite(c)
   122  	s.AgentSuite.TearDownSuite(c)
   123  }
   124  
   125  func (s *commonMachineSuite) SetUpTest(c *gc.C) {
   126  	s.AgentSuite.PatchValue(&version.Current.Number, coretesting.FakeVersionNumber)
   127  	s.AgentSuite.SetUpTest(c)
   128  	s.TestSuite.SetUpTest(c)
   129  	s.AgentSuite.PatchValue(&charmrepo.CacheDir, c.MkDir())
   130  	s.AgentSuite.PatchValue(&stateWorkerDialOpts, mongo.DefaultDialOpts())
   131  
   132  	os.Remove(JujuRun) // ignore error; may not exist
   133  	// Patch ssh user to avoid touching ~ubuntu/.ssh/authorized_keys.
   134  	s.AgentSuite.PatchValue(&authenticationworker.SSHUser, "")
   135  
   136  	testpath := c.MkDir()
   137  	s.AgentSuite.PatchEnvPathPrepend(testpath)
   138  	// mock out the start method so we can fake install services without sudo
   139  	fakeCmd(filepath.Join(testpath, "start"))
   140  	fakeCmd(filepath.Join(testpath, "stop"))
   141  
   142  	s.AgentSuite.PatchValue(&upstart.InitDir, c.MkDir())
   143  
   144  	s.singularRecord = newSingularRunnerRecord()
   145  	s.AgentSuite.PatchValue(&newSingularRunner, s.singularRecord.newSingularRunner)
   146  	s.AgentSuite.PatchValue(&peergrouperNew, func(st *state.State) (worker.Worker, error) {
   147  		return newDummyWorker(), nil
   148  	})
   149  
   150  	s.fakeEnsureMongo = agenttesting.InstallFakeEnsureMongo(s)
   151  	s.AgentSuite.PatchValue(&maybeInitiateMongoServer, s.fakeEnsureMongo.InitiateMongo)
   152  }
   153  
   154  func fakeCmd(path string) {
   155  	err := ioutil.WriteFile(path, []byte("#!/bin/bash --norc\nexit 0"), 0755)
   156  	if err != nil {
   157  		panic(err)
   158  	}
   159  }
   160  
   161  func (s *commonMachineSuite) TearDownTest(c *gc.C) {
   162  	s.TestSuite.TearDownTest(c)
   163  	s.AgentSuite.TearDownTest(c)
   164  }
   165  
   166  // primeAgent adds a new Machine to run the given jobs, and sets up the
   167  // machine agent's directory.  It returns the new machine, the
   168  // agent's configuration and the tools currently running.
   169  func (s *commonMachineSuite) primeAgent(
   170  	c *gc.C, vers version.Binary,
   171  	jobs ...state.MachineJob) (m *state.Machine, agentConfig agent.ConfigSetterWriter, tools *tools.Tools) {
   172  
   173  	m, err := s.State.AddMachine("quantal", jobs...)
   174  	c.Assert(err, jc.ErrorIsNil)
   175  
   176  	pinger, err := m.SetAgentPresence()
   177  	c.Assert(err, jc.ErrorIsNil)
   178  	s.AddCleanup(func(c *gc.C) {
   179  		err := pinger.Stop()
   180  		c.Check(err, jc.ErrorIsNil)
   181  	})
   182  
   183  	return s.configureMachine(c, m.Id(), vers)
   184  }
   185  
   186  func (s *commonMachineSuite) configureMachine(c *gc.C, machineId string, vers version.Binary) (
   187  	machine *state.Machine, agentConfig agent.ConfigSetterWriter, tools *tools.Tools,
   188  ) {
   189  	m, err := s.State.Machine(machineId)
   190  	c.Assert(err, jc.ErrorIsNil)
   191  
   192  	// Add a machine and ensure it is provisioned.
   193  	inst, md := jujutesting.AssertStartInstance(c, s.Environ, machineId)
   194  	c.Assert(m.SetProvisioned(inst.Id(), agent.BootstrapNonce, md), jc.ErrorIsNil)
   195  
   196  	// Add an address for the tests in case the maybeInitiateMongoServer
   197  	// codepath is exercised.
   198  	s.setFakeMachineAddresses(c, m)
   199  
   200  	// Set up the new machine.
   201  	err = m.SetAgentVersion(vers)
   202  	c.Assert(err, jc.ErrorIsNil)
   203  	err = m.SetPassword(initialMachinePassword)
   204  	c.Assert(err, jc.ErrorIsNil)
   205  	tag := m.Tag()
   206  	if m.IsManager() {
   207  		err = m.SetMongoPassword(initialMachinePassword)
   208  		c.Assert(err, jc.ErrorIsNil)
   209  		agentConfig, tools = s.AgentSuite.PrimeStateAgent(c, tag, initialMachinePassword, vers)
   210  		info, ok := agentConfig.StateServingInfo()
   211  		c.Assert(ok, jc.IsTrue)
   212  		ssi := cmdutil.ParamsStateServingInfoToStateStateServingInfo(info)
   213  		err = s.State.SetStateServingInfo(ssi)
   214  		c.Assert(err, jc.ErrorIsNil)
   215  	} else {
   216  		agentConfig, tools = s.PrimeAgent(c, tag, initialMachinePassword, vers)
   217  	}
   218  	err = agentConfig.Write()
   219  	c.Assert(err, jc.ErrorIsNil)
   220  	return m, agentConfig, tools
   221  }
   222  
   223  // newAgent returns a new MachineAgent instance
   224  func (s *commonMachineSuite) newAgent(c *gc.C, m *state.Machine) *MachineAgent {
   225  	agentConf := agentConf{dataDir: s.DataDir()}
   226  	agentConf.ReadConfig(names.NewMachineTag(m.Id()).String())
   227  	logsCh, err := logsender.InstallBufferedLogWriter(1024)
   228  	c.Assert(err, jc.ErrorIsNil)
   229  	machineAgentFactory := MachineAgentFactoryFn(
   230  		&agentConf, logsCh, &mockLoopDeviceManager{},
   231  	)
   232  	return machineAgentFactory(m.Id())
   233  }
   234  
   235  func (s *MachineSuite) TestParseSuccess(c *gc.C) {
   236  	create := func() (cmd.Command, AgentConf) {
   237  		agentConf := agentConf{dataDir: s.DataDir()}
   238  		a := NewMachineAgentCmd(
   239  			nil,
   240  			MachineAgentFactoryFn(
   241  				&agentConf, nil, &mockLoopDeviceManager{},
   242  			),
   243  			&agentConf,
   244  			&agentConf,
   245  		)
   246  		a.(*machineAgentCmd).logToStdErr = true
   247  
   248  		return a, &agentConf
   249  	}
   250  	a := CheckAgentCommand(c, create, []string{"--machine-id", "42"})
   251  	c.Assert(a.(*machineAgentCmd).machineId, gc.Equals, "42")
   252  }
   253  
   254  type MachineSuite struct {
   255  	commonMachineSuite
   256  	metricAPI *mockMetricAPI
   257  }
   258  
   259  var perEnvSingularWorkers = []string{
   260  	"cleaner",
   261  	"minunitsworker",
   262  	"addresserworker",
   263  	"environ-provisioner",
   264  	"charm-revision-updater",
   265  	"instancepoller",
   266  	"firewaller",
   267  }
   268  
   269  const initialMachinePassword = "machine-password-1234567890"
   270  
   271  func (s *MachineSuite) SetUpTest(c *gc.C) {
   272  	s.commonMachineSuite.SetUpTest(c)
   273  	s.metricAPI = newMockMetricAPI()
   274  	s.PatchValue(&getMetricAPI, func(_ api.Connection) apimetricsmanager.MetricsManagerClient {
   275  		return s.metricAPI
   276  	})
   277  	s.AddCleanup(func(*gc.C) { s.metricAPI.Stop() })
   278  	// Most of these tests normally finish sub-second on a fast machine.
   279  	// If any given test hits a minute, we have almost certainly become
   280  	// wedged, so dump the logs.
   281  	coretesting.DumpTestLogsAfter(time.Minute, c, s)
   282  }
   283  
   284  func (s *MachineSuite) TestParseNonsense(c *gc.C) {
   285  	for _, args := range [][]string{
   286  		{},
   287  		{"--machine-id", "-4004"},
   288  	} {
   289  		var agentConf agentConf
   290  		err := ParseAgentCommand(&machineAgentCmd{agentInitializer: &agentConf}, args)
   291  		c.Assert(err, gc.ErrorMatches, "--machine-id option must be set, and expects a non-negative integer")
   292  	}
   293  }
   294  
   295  func (s *MachineSuite) TestParseUnknown(c *gc.C) {
   296  	var agentConf agentConf
   297  	a := &machineAgentCmd{agentInitializer: &agentConf}
   298  	err := ParseAgentCommand(a, []string{"--machine-id", "42", "blistering barnacles"})
   299  	c.Assert(err, gc.ErrorMatches, `unrecognized args: \["blistering barnacles"\]`)
   300  }
   301  
   302  func (s *MachineSuite) TestRunInvalidMachineId(c *gc.C) {
   303  	c.Skip("agents don't yet distinguish between temporary and permanent errors")
   304  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   305  	err := s.newAgent(c, m).Run(nil)
   306  	c.Assert(err, gc.ErrorMatches, "some error")
   307  }
   308  
   309  func (s *MachineSuite) TestUseLumberjack(c *gc.C) {
   310  	ctx, err := cmd.DefaultContext()
   311  	c.Assert(err, gc.IsNil)
   312  
   313  	agentConf := FakeAgentConfig{}
   314  
   315  	a := NewMachineAgentCmd(
   316  		ctx,
   317  		MachineAgentFactoryFn(
   318  			agentConf, nil, &mockLoopDeviceManager{},
   319  		),
   320  		agentConf,
   321  		agentConf,
   322  	)
   323  	// little hack to set the data that Init expects to already be set
   324  	a.(*machineAgentCmd).machineId = "42"
   325  
   326  	err = a.Init(nil)
   327  	c.Assert(err, gc.IsNil)
   328  
   329  	l, ok := ctx.Stderr.(*lumberjack.Logger)
   330  	c.Assert(ok, jc.IsTrue)
   331  	c.Check(l.MaxAge, gc.Equals, 0)
   332  	c.Check(l.MaxBackups, gc.Equals, 2)
   333  	c.Check(l.Filename, gc.Equals, filepath.FromSlash("/var/log/juju/machine-42.log"))
   334  	c.Check(l.MaxSize, gc.Equals, 300)
   335  }
   336  
   337  func (s *MachineSuite) TestDontUseLumberjack(c *gc.C) {
   338  	ctx, err := cmd.DefaultContext()
   339  	c.Assert(err, gc.IsNil)
   340  
   341  	agentConf := FakeAgentConfig{}
   342  
   343  	a := NewMachineAgentCmd(
   344  		ctx,
   345  		MachineAgentFactoryFn(
   346  			agentConf, nil,
   347  			&mockLoopDeviceManager{},
   348  		),
   349  		agentConf,
   350  		agentConf,
   351  	)
   352  	// little hack to set the data that Init expects to already be set
   353  	a.(*machineAgentCmd).machineId = "42"
   354  
   355  	// set the value that normally gets set by the flag parsing
   356  	a.(*machineAgentCmd).logToStdErr = true
   357  
   358  	err = a.Init(nil)
   359  	c.Assert(err, gc.IsNil)
   360  
   361  	_, ok := ctx.Stderr.(*lumberjack.Logger)
   362  	c.Assert(ok, jc.IsFalse)
   363  }
   364  
   365  func (s *MachineSuite) TestRunStop(c *gc.C) {
   366  	m, ac, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   367  	a := s.newAgent(c, m)
   368  	done := make(chan error)
   369  	go func() {
   370  		done <- a.Run(nil)
   371  	}()
   372  	err := a.Stop()
   373  	c.Assert(err, jc.ErrorIsNil)
   374  	c.Assert(<-done, jc.ErrorIsNil)
   375  	c.Assert(charmrepo.CacheDir, gc.Equals, filepath.Join(ac.DataDir(), "charmcache"))
   376  }
   377  
   378  func (s *MachineSuite) TestWithDeadMachine(c *gc.C) {
   379  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   380  	err := m.EnsureDead()
   381  	c.Assert(err, jc.ErrorIsNil)
   382  	a := s.newAgent(c, m)
   383  	err = runWithTimeout(a)
   384  	c.Assert(err, jc.ErrorIsNil)
   385  }
   386  
   387  func (s *MachineSuite) TestWithRemovedMachine(c *gc.C) {
   388  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   389  	err := m.EnsureDead()
   390  	c.Assert(err, jc.ErrorIsNil)
   391  	err = m.Remove()
   392  	c.Assert(err, jc.ErrorIsNil)
   393  	a := s.newAgent(c, m)
   394  	err = runWithTimeout(a)
   395  	c.Assert(err, jc.ErrorIsNil)
   396  }
   397  
   398  func (s *MachineSuite) TestDyingMachine(c *gc.C) {
   399  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   400  	a := s.newAgent(c, m)
   401  	done := make(chan error)
   402  	go func() {
   403  		done <- a.Run(nil)
   404  	}()
   405  	defer func() {
   406  		c.Check(a.Stop(), jc.ErrorIsNil)
   407  	}()
   408  	// Wait for configuration to be finished
   409  	<-a.WorkersStarted()
   410  	err := m.Destroy()
   411  	c.Assert(err, jc.ErrorIsNil)
   412  	select {
   413  	case err := <-done:
   414  		c.Assert(err, jc.ErrorIsNil)
   415  	case <-time.After(watcher.Period * 5 / 4):
   416  		// TODO(rog) Fix this so it doesn't wait for so long.
   417  		// https://bugs.launchpad.net/juju-core/+bug/1163983
   418  		c.Fatalf("timed out waiting for agent to terminate")
   419  	}
   420  	err = m.Refresh()
   421  	c.Assert(err, jc.ErrorIsNil)
   422  	c.Assert(m.Life(), gc.Equals, state.Dead)
   423  }
   424  
   425  func (s *MachineSuite) TestHostUnits(c *gc.C) {
   426  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   427  	a := s.newAgent(c, m)
   428  	ctx, reset := patchDeployContext(c, s.BackingState)
   429  	defer reset()
   430  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   431  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   432  
   433  	// check that unassigned units don't trigger any deployments.
   434  	svc := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
   435  	u0, err := svc.AddUnit()
   436  	c.Assert(err, jc.ErrorIsNil)
   437  	u1, err := svc.AddUnit()
   438  	c.Assert(err, jc.ErrorIsNil)
   439  
   440  	ctx.waitDeployed(c)
   441  
   442  	// assign u0, check it's deployed.
   443  	err = u0.AssignToMachine(m)
   444  	c.Assert(err, jc.ErrorIsNil)
   445  	ctx.waitDeployed(c, u0.Name())
   446  
   447  	// "start the agent" for u0 to prevent short-circuited remove-on-destroy;
   448  	// check that it's kept deployed despite being Dying.
   449  	err = u0.SetAgentStatus(state.StatusIdle, "", nil)
   450  	c.Assert(err, jc.ErrorIsNil)
   451  	err = u0.Destroy()
   452  	c.Assert(err, jc.ErrorIsNil)
   453  	ctx.waitDeployed(c, u0.Name())
   454  
   455  	// add u1 to the machine, check it's deployed.
   456  	err = u1.AssignToMachine(m)
   457  	c.Assert(err, jc.ErrorIsNil)
   458  	ctx.waitDeployed(c, u0.Name(), u1.Name())
   459  
   460  	// make u0 dead; check the deployer recalls the unit and removes it from
   461  	// state.
   462  	err = u0.EnsureDead()
   463  	c.Assert(err, jc.ErrorIsNil)
   464  	ctx.waitDeployed(c, u1.Name())
   465  
   466  	// The deployer actually removes the unit just after
   467  	// removing its deployment, so we need to poll here
   468  	// until it actually happens.
   469  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
   470  		err := u0.Refresh()
   471  		if err == nil && attempt.HasNext() {
   472  			continue
   473  		}
   474  		c.Assert(err, jc.Satisfies, errors.IsNotFound)
   475  	}
   476  
   477  	// short-circuit-remove u1 after it's been deployed; check it's recalled
   478  	// and removed from state.
   479  	err = u1.Destroy()
   480  	c.Assert(err, jc.ErrorIsNil)
   481  	err = u1.Refresh()
   482  	c.Assert(err, jc.Satisfies, errors.IsNotFound)
   483  	ctx.waitDeployed(c)
   484  }
   485  
   486  func patchDeployContext(c *gc.C, st *state.State) (*fakeContext, func()) {
   487  	ctx := &fakeContext{
   488  		inited:   make(chan struct{}),
   489  		deployed: make(set.Strings),
   490  	}
   491  	orig := newDeployContext
   492  	newDeployContext = func(dst *apideployer.State, agentConfig agent.Config) deployer.Context {
   493  		ctx.st = st
   494  		ctx.agentConfig = agentConfig
   495  		close(ctx.inited)
   496  		return ctx
   497  	}
   498  	return ctx, func() { newDeployContext = orig }
   499  }
   500  
   501  func (s *commonMachineSuite) setFakeMachineAddresses(c *gc.C, machine *state.Machine) {
   502  	addrs := network.NewAddresses("0.1.2.3")
   503  	err := machine.SetProviderAddresses(addrs...)
   504  	c.Assert(err, jc.ErrorIsNil)
   505  	// Set the addresses in the environ instance as well so that if the instance poller
   506  	// runs it won't overwrite them.
   507  	instId, err := machine.InstanceId()
   508  	c.Assert(err, jc.ErrorIsNil)
   509  	insts, err := s.Environ.Instances([]instance.Id{instId})
   510  	c.Assert(err, jc.ErrorIsNil)
   511  	dummy.SetInstanceAddresses(insts[0], addrs)
   512  }
   513  
   514  func (s *MachineSuite) TestManageEnviron(c *gc.C) {
   515  	usefulVersion := version.Current
   516  	usefulVersion.Series = "quantal" // to match the charm created below
   517  	envtesting.AssertUploadFakeToolsVersions(
   518  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion)
   519  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   520  	op := make(chan dummy.Operation, 200)
   521  	dummy.Listen(op)
   522  
   523  	a := s.newAgent(c, m)
   524  	// Make sure the agent is stopped even if the test fails.
   525  	defer a.Stop()
   526  	done := make(chan error)
   527  	go func() {
   528  		done <- a.Run(nil)
   529  	}()
   530  
   531  	// See state server runners start
   532  	r0 := s.singularRecord.nextRunner(c)
   533  	r0.waitForWorker(c, "txnpruner")
   534  
   535  	r1 := s.singularRecord.nextRunner(c)
   536  	r1.waitForWorkers(c, perEnvSingularWorkers)
   537  
   538  	// Check that the provisioner and firewaller are alive by doing
   539  	// a rudimentary check that it responds to state changes.
   540  
   541  	// Add one unit to a service; it should get allocated a machine
   542  	// and then its ports should be opened.
   543  	charm := s.AddTestingCharm(c, "dummy")
   544  	svc := s.AddTestingService(c, "test-service", charm)
   545  	err := svc.SetExposed()
   546  	c.Assert(err, jc.ErrorIsNil)
   547  	units, err := juju.AddUnits(s.State, svc, 1, "")
   548  	c.Assert(err, jc.ErrorIsNil)
   549  	c.Check(opRecvTimeout(c, s.State, op, dummy.OpStartInstance{}), gc.NotNil)
   550  
   551  	// Wait for the instance id to show up in the state.
   552  	s.waitProvisioned(c, units[0])
   553  	err = units[0].OpenPort("tcp", 999)
   554  	c.Assert(err, jc.ErrorIsNil)
   555  
   556  	c.Check(opRecvTimeout(c, s.State, op, dummy.OpOpenPorts{}), gc.NotNil)
   557  
   558  	// Check that the metrics workers have started by adding metrics
   559  	select {
   560  	case <-time.After(coretesting.LongWait):
   561  		c.Fatalf("timed out waiting for metric cleanup API to be called")
   562  	case <-s.metricAPI.CleanupCalled():
   563  	}
   564  	select {
   565  	case <-time.After(coretesting.LongWait):
   566  		c.Fatalf("timed out waiting for metric sender API to be called")
   567  	case <-s.metricAPI.SendCalled():
   568  	}
   569  
   570  	err = a.Stop()
   571  	c.Assert(err, jc.ErrorIsNil)
   572  
   573  	select {
   574  	case err := <-done:
   575  		c.Assert(err, jc.ErrorIsNil)
   576  	case <-time.After(5 * time.Second):
   577  		c.Fatalf("timed out waiting for agent to terminate")
   578  	}
   579  }
   580  
   581  func (s *MachineSuite) TestManageEnvironRunsResumer(c *gc.C) {
   582  	started := make(chan struct{})
   583  	s.AgentSuite.PatchValue(&newResumer, func(st resumer.TransactionResumer) *resumer.Resumer {
   584  		close(started)
   585  		return resumer.NewResumer(st)
   586  	})
   587  
   588  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   589  	a := s.newAgent(c, m)
   590  	defer a.Stop()
   591  	go func() {
   592  		c.Check(a.Run(nil), jc.ErrorIsNil)
   593  	}()
   594  
   595  	// Wait for the worker that starts before the resumer to start.
   596  	_ = s.singularRecord.nextRunner(c)
   597  	r := s.singularRecord.nextRunner(c)
   598  	r.waitForWorker(c, "charm-revision-updater")
   599  
   600  	// Now make sure the resumer starts.
   601  	select {
   602  	case <-started:
   603  	case <-time.After(coretesting.LongWait):
   604  		c.Fatalf("resumer worker not started as expected")
   605  	}
   606  }
   607  
   608  func (s *MachineSuite) TestManageEnvironStartsInstancePoller(c *gc.C) {
   609  	started := make(chan struct{})
   610  	s.AgentSuite.PatchValue(&newInstancePoller, func(st *apiinstancepoller.API) worker.Worker {
   611  		close(started)
   612  		return instancepoller.NewWorker(st)
   613  	})
   614  
   615  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   616  	a := s.newAgent(c, m)
   617  	defer a.Stop()
   618  	go func() {
   619  		c.Check(a.Run(nil), jc.ErrorIsNil)
   620  	}()
   621  
   622  	// Wait for the worker that starts before the instancepoller to
   623  	// start.
   624  	_ = s.singularRecord.nextRunner(c)
   625  	r := s.singularRecord.nextRunner(c)
   626  	r.waitForWorker(c, "charm-revision-updater")
   627  
   628  	// Now make sure the resumer starts.
   629  	select {
   630  	case <-started:
   631  	case <-time.After(coretesting.LongWait):
   632  		c.Fatalf("instancepoller worker not started as expected")
   633  	}
   634  }
   635  
   636  const startWorkerWait = 250 * time.Millisecond
   637  
   638  func (s *MachineSuite) TestManageEnvironDoesNotRunFirewallerWhenModeIsNone(c *gc.C) {
   639  	s.PatchValue(&getFirewallMode, func(api.Connection) (string, error) {
   640  		return config.FwNone, nil
   641  	})
   642  	started := make(chan struct{})
   643  	s.AgentSuite.PatchValue(&newFirewaller, func(st *apifirewaller.State) (worker.Worker, error) {
   644  		close(started)
   645  		return newDummyWorker(), nil
   646  	})
   647  
   648  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   649  	a := s.newAgent(c, m)
   650  	defer a.Stop()
   651  	go func() {
   652  		c.Check(a.Run(nil), jc.ErrorIsNil)
   653  	}()
   654  
   655  	// Wait for the worker that starts before the firewaller to start.
   656  	_ = s.singularRecord.nextRunner(c)
   657  	r := s.singularRecord.nextRunner(c)
   658  	r.waitForWorker(c, "charm-revision-updater")
   659  
   660  	// Now make sure the firewaller doesn't start.
   661  	select {
   662  	case <-started:
   663  		c.Fatalf("firewaller worker unexpectedly started")
   664  	case <-time.After(startWorkerWait):
   665  	}
   666  }
   667  
   668  func (s *MachineSuite) TestManageEnvironRunsInstancePoller(c *gc.C) {
   669  	s.AgentSuite.PatchValue(&instancepoller.ShortPoll, 500*time.Millisecond)
   670  	usefulVersion := version.Current
   671  	usefulVersion.Series = "quantal" // to match the charm created below
   672  	envtesting.AssertUploadFakeToolsVersions(
   673  		c, s.DefaultToolsStorage,
   674  		s.Environ.Config().AgentStream(),
   675  		s.Environ.Config().AgentStream(),
   676  		usefulVersion,
   677  	)
   678  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   679  	a := s.newAgent(c, m)
   680  	defer a.Stop()
   681  	go func() {
   682  		c.Check(a.Run(nil), jc.ErrorIsNil)
   683  	}()
   684  
   685  	// Add one unit to a service;
   686  	charm := s.AddTestingCharm(c, "dummy")
   687  	svc := s.AddTestingService(c, "test-service", charm)
   688  	units, err := juju.AddUnits(s.State, svc, 1, "")
   689  	c.Assert(err, jc.ErrorIsNil)
   690  
   691  	m, instId := s.waitProvisioned(c, units[0])
   692  	insts, err := s.Environ.Instances([]instance.Id{instId})
   693  	c.Assert(err, jc.ErrorIsNil)
   694  	addrs := network.NewAddresses("1.2.3.4")
   695  	dummy.SetInstanceAddresses(insts[0], addrs)
   696  	dummy.SetInstanceStatus(insts[0], "running")
   697  
   698  	for a := coretesting.LongAttempt.Start(); a.Next(); {
   699  		if !a.HasNext() {
   700  			c.Logf("final machine addresses: %#v", m.Addresses())
   701  			c.Fatalf("timed out waiting for machine to get address")
   702  		}
   703  		err := m.Refresh()
   704  		c.Assert(err, jc.ErrorIsNil)
   705  		instStatus, err := m.InstanceStatus()
   706  		c.Assert(err, jc.ErrorIsNil)
   707  		if reflect.DeepEqual(m.Addresses(), addrs) && instStatus == "running" {
   708  			break
   709  		}
   710  	}
   711  }
   712  
   713  func (s *MachineSuite) TestManageEnvironRunsPeergrouper(c *gc.C) {
   714  	started := make(chan struct{}, 1)
   715  	s.AgentSuite.PatchValue(&peergrouperNew, func(st *state.State) (worker.Worker, error) {
   716  		c.Check(st, gc.NotNil)
   717  		select {
   718  		case started <- struct{}{}:
   719  		default:
   720  		}
   721  		return newDummyWorker(), nil
   722  	})
   723  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   724  	a := s.newAgent(c, m)
   725  	defer a.Stop()
   726  	go func() {
   727  		c.Check(a.Run(nil), jc.ErrorIsNil)
   728  	}()
   729  	select {
   730  	case <-started:
   731  	case <-time.After(coretesting.LongWait):
   732  		c.Fatalf("timed out waiting for peergrouper worker to be started")
   733  	}
   734  }
   735  
   736  func (s *MachineSuite) testAddresserNewWorkerResult(c *gc.C, expectFinished bool) {
   737  	// TODO(dimitern): Fix this in a follow-up.
   738  	c.Skip("Test temporarily disabled as flaky - see bug lp:1488576")
   739  
   740  	started := make(chan struct{})
   741  	s.PatchValue(&newAddresser, func(api *apiaddresser.API) (worker.Worker, error) {
   742  		close(started)
   743  		w, err := addresser.NewWorker(api)
   744  		c.Check(err, jc.ErrorIsNil)
   745  		if expectFinished {
   746  			// When the address-allocation feature flag is disabled.
   747  			c.Check(w, gc.FitsTypeOf, worker.FinishedWorker{})
   748  		} else {
   749  			// When the address-allocation feature flag is enabled.
   750  			c.Check(w, gc.Not(gc.FitsTypeOf), worker.FinishedWorker{})
   751  		}
   752  		return w, err
   753  	})
   754  
   755  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   756  	a := s.newAgent(c, m)
   757  	defer a.Stop()
   758  	go func() {
   759  		c.Check(a.Run(nil), jc.ErrorIsNil)
   760  	}()
   761  
   762  	// Wait for the worker that starts before the addresser to start.
   763  	_ = s.singularRecord.nextRunner(c)
   764  	r := s.singularRecord.nextRunner(c)
   765  	r.waitForWorker(c, "cleaner")
   766  
   767  	select {
   768  	case <-started:
   769  	case <-time.After(coretesting.LongWait):
   770  		c.Fatalf("timed out waiting for addresser to start")
   771  	}
   772  }
   773  
   774  func (s *MachineSuite) TestAddresserWorkerDoesNotStopWhenAddressDeallocationSupported(c *gc.C) {
   775  	s.SetFeatureFlags(feature.AddressAllocation)
   776  	s.testAddresserNewWorkerResult(c, false)
   777  }
   778  
   779  func (s *MachineSuite) TestAddresserWorkerStopsWhenAddressDeallocationNotSupported(c *gc.C) {
   780  	s.SetFeatureFlags()
   781  	s.testAddresserNewWorkerResult(c, true)
   782  }
   783  
   784  func (s *MachineSuite) TestManageEnvironRunsDbLogPrunerIfFeatureFlagEnabled(c *gc.C) {
   785  	s.SetFeatureFlags("db-log")
   786  
   787  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   788  	a := s.newAgent(c, m)
   789  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   790  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   791  
   792  	runner := s.singularRecord.nextRunner(c)
   793  	runner.waitForWorker(c, "dblogpruner")
   794  }
   795  
   796  func (s *MachineSuite) TestManageEnvironDoesntRunDbLogPrunerByDefault(c *gc.C) {
   797  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   798  	a := s.newAgent(c, m)
   799  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   800  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   801  
   802  	// Wait for the txnpruner to be started. This is started just after
   803  	// dblogpruner would be started.
   804  	runner := s.singularRecord.nextRunner(c)
   805  	started := set.NewStrings(runner.waitForWorker(c, "txnpruner")...)
   806  	c.Assert(started.Contains("dblogpruner"), jc.IsFalse)
   807  }
   808  
   809  func (s *MachineSuite) TestManageEnvironRunsStatusHistoryPruner(c *gc.C) {
   810  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   811  	a := s.newAgent(c, m)
   812  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
   813  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
   814  
   815  	runner := s.singularRecord.nextRunner(c)
   816  	runner.waitForWorker(c, "statushistorypruner")
   817  }
   818  
   819  func (s *MachineSuite) TestManageEnvironCallsUseMultipleCPUs(c *gc.C) {
   820  	// If it has been enabled, the JobManageEnviron agent should call utils.UseMultipleCPUs
   821  	usefulVersion := version.Current
   822  	usefulVersion.Series = "quantal"
   823  	envtesting.AssertUploadFakeToolsVersions(
   824  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion)
   825  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
   826  	calledChan := make(chan struct{}, 1)
   827  	s.AgentSuite.PatchValue(&useMultipleCPUs, func() { calledChan <- struct{}{} })
   828  	// Now, start the agent, and observe that a JobManageEnviron agent
   829  	// calls UseMultipleCPUs
   830  	a := s.newAgent(c, m)
   831  	defer a.Stop()
   832  	go func() {
   833  		c.Check(a.Run(nil), jc.ErrorIsNil)
   834  	}()
   835  	// Wait for configuration to be finished
   836  	<-a.WorkersStarted()
   837  	select {
   838  	case <-calledChan:
   839  	case <-time.After(coretesting.LongWait):
   840  		c.Errorf("we failed to call UseMultipleCPUs()")
   841  	}
   842  	c.Check(a.Stop(), jc.ErrorIsNil)
   843  	// However, an agent that just JobHostUnits doesn't call UseMultipleCPUs
   844  	m2, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
   845  	a2 := s.newAgent(c, m2)
   846  	defer a2.Stop()
   847  	go func() {
   848  		c.Check(a2.Run(nil), jc.ErrorIsNil)
   849  	}()
   850  	// Wait until all the workers have been started, and then kill everything
   851  	<-a2.workersStarted
   852  	c.Check(a2.Stop(), jc.ErrorIsNil)
   853  	select {
   854  	case <-calledChan:
   855  		c.Errorf("we should not have called UseMultipleCPUs()")
   856  	case <-time.After(coretesting.ShortWait):
   857  	}
   858  }
   859  
   860  func (s *MachineSuite) waitProvisioned(c *gc.C, unit *state.Unit) (*state.Machine, instance.Id) {
   861  	c.Logf("waiting for unit %q to be provisioned", unit)
   862  	machineId, err := unit.AssignedMachineId()
   863  	c.Assert(err, jc.ErrorIsNil)
   864  	m, err := s.State.Machine(machineId)
   865  	c.Assert(err, jc.ErrorIsNil)
   866  	w := m.Watch()
   867  	defer w.Stop()
   868  	timeout := time.After(coretesting.LongWait)
   869  	for {
   870  		select {
   871  		case <-timeout:
   872  			c.Fatalf("timed out waiting for provisioning")
   873  		case _, ok := <-w.Changes():
   874  			c.Assert(ok, jc.IsTrue)
   875  			err := m.Refresh()
   876  			c.Assert(err, jc.ErrorIsNil)
   877  			if instId, err := m.InstanceId(); err == nil {
   878  				c.Logf("unit provisioned with instance %s", instId)
   879  				return m, instId
   880  			} else {
   881  				c.Check(err, jc.Satisfies, errors.IsNotProvisioned)
   882  			}
   883  		}
   884  	}
   885  }
   886  
   887  func (s *MachineSuite) testUpgradeRequest(c *gc.C, agent runner, tag string, currentTools *tools.Tools) {
   888  	newVers := version.Current
   889  	newVers.Patch++
   890  	newTools := envtesting.AssertUploadFakeToolsVersions(
   891  		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), newVers)[0]
   892  	err := s.State.SetEnvironAgentVersion(newVers.Number)
   893  	c.Assert(err, jc.ErrorIsNil)
   894  	err = runWithTimeout(agent)
   895  	envtesting.CheckUpgraderReadyError(c, err, &upgrader.UpgradeReadyError{
   896  		AgentName: tag,
   897  		OldTools:  currentTools.Version,
   898  		NewTools:  newTools.Version,
   899  		DataDir:   s.DataDir(),
   900  	})
   901  }
   902  
   903  func (s *MachineSuite) TestUpgradeRequest(c *gc.C) {
   904  	m, _, currentTools := s.primeAgent(c, version.Current, state.JobManageEnviron, state.JobHostUnits)
   905  	a := s.newAgent(c, m)
   906  	s.testUpgradeRequest(c, a, m.Tag().String(), currentTools)
   907  	c.Assert(a.isAgentUpgradePending(), jc.IsTrue)
   908  }
   909  
   910  func (s *MachineSuite) TestNoUpgradeRequired(c *gc.C) {
   911  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron, state.JobHostUnits)
   912  	a := s.newAgent(c, m)
   913  	done := make(chan error)
   914  	go func() { done <- a.Run(nil) }()
   915  	select {
   916  	case <-a.initialAgentUpgradeCheckComplete:
   917  	case <-time.After(coretesting.LongWait):
   918  		c.Fatalf("timeout waiting for upgrade check")
   919  	}
   920  	defer a.Stop() // in case of failure
   921  	s.waitStopped(c, state.JobManageEnviron, a, done)
   922  	c.Assert(a.isAgentUpgradePending(), jc.IsFalse)
   923  }
   924  
   925  var fastDialOpts = api.DialOpts{
   926  	Timeout:    coretesting.LongWait,
   927  	RetryDelay: coretesting.ShortWait,
   928  }
   929  
   930  func (s *MachineSuite) waitStopped(c *gc.C, job state.MachineJob, a *MachineAgent, done chan error) {
   931  	err := a.Stop()
   932  	if job == state.JobManageEnviron {
   933  		// When shutting down, the API server can be shut down before
   934  		// the other workers that connect to it, so they get an error so
   935  		// they then die, causing Stop to return an error.  It's not
   936  		// easy to control the actual error that's received in this
   937  		// circumstance so we just log it rather than asserting that it
   938  		// is not nil.
   939  		if err != nil {
   940  			c.Logf("error shutting down state manager: %v", err)
   941  		}
   942  	} else {
   943  		c.Assert(err, jc.ErrorIsNil)
   944  	}
   945  
   946  	select {
   947  	case err := <-done:
   948  		c.Assert(err, jc.ErrorIsNil)
   949  	case <-time.After(5 * time.Second):
   950  		c.Fatalf("timed out waiting for agent to terminate")
   951  	}
   952  }
   953  
   954  func (s *MachineSuite) assertJobWithAPI(
   955  	c *gc.C,
   956  	job state.MachineJob,
   957  	test func(agent.Config, api.Connection),
   958  ) {
   959  	s.assertAgentOpensState(c, &reportOpenedAPI, job, func(cfg agent.Config, st interface{}) {
   960  		test(cfg, st.(api.Connection))
   961  	})
   962  }
   963  
   964  func (s *MachineSuite) assertJobWithState(
   965  	c *gc.C,
   966  	job state.MachineJob,
   967  	test func(agent.Config, *state.State),
   968  ) {
   969  	paramsJob := job.ToParams()
   970  	if !paramsJob.NeedsState() {
   971  		c.Fatalf("%v does not use state", paramsJob)
   972  	}
   973  	s.assertAgentOpensState(c, &reportOpenedState, job, func(cfg agent.Config, st interface{}) {
   974  		test(cfg, st.(*state.State))
   975  	})
   976  }
   977  
   978  // assertAgentOpensState asserts that a machine agent started with the
   979  // given job will call the function pointed to by reportOpened. The
   980  // agent's configuration and the value passed to reportOpened are then
   981  // passed to the test function for further checking.
   982  func (s *MachineSuite) assertAgentOpensState(
   983  	c *gc.C,
   984  	reportOpened *func(io.Closer),
   985  	job state.MachineJob,
   986  	test func(agent.Config, interface{}),
   987  ) {
   988  	stm, conf, _ := s.primeAgent(c, version.Current, job)
   989  	a := s.newAgent(c, stm)
   990  	defer a.Stop()
   991  	logger.Debugf("new agent %#v", a)
   992  
   993  	// All state jobs currently also run an APIWorker, so no
   994  	// need to check for that here, like in assertJobWithState.
   995  
   996  	agentAPIs := make(chan io.Closer, 1)
   997  	s.AgentSuite.PatchValue(reportOpened, func(st io.Closer) {
   998  		select {
   999  		case agentAPIs <- st:
  1000  		default:
  1001  		}
  1002  	})
  1003  
  1004  	done := make(chan error)
  1005  	go func() {
  1006  		done <- a.Run(nil)
  1007  	}()
  1008  
  1009  	select {
  1010  	case agentAPI := <-agentAPIs:
  1011  		c.Assert(agentAPI, gc.NotNil)
  1012  		test(conf, agentAPI)
  1013  	case <-time.After(coretesting.LongWait):
  1014  		c.Fatalf("API not opened")
  1015  	}
  1016  
  1017  	s.waitStopped(c, job, a, done)
  1018  }
  1019  
  1020  func (s *MachineSuite) TestManageEnvironServesAPI(c *gc.C) {
  1021  	s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) {
  1022  		st, err := api.Open(conf.APIInfo(), fastDialOpts)
  1023  		c.Assert(err, jc.ErrorIsNil)
  1024  		defer st.Close()
  1025  		m, err := st.Machiner().Machine(conf.Tag().(names.MachineTag))
  1026  		c.Assert(err, jc.ErrorIsNil)
  1027  		c.Assert(m.Life(), gc.Equals, params.Alive)
  1028  	})
  1029  }
  1030  
  1031  func (s *MachineSuite) assertAgentSetsToolsVersion(c *gc.C, job state.MachineJob) {
  1032  	vers := version.Current
  1033  	vers.Minor = version.Current.Minor + 1
  1034  	m, _, _ := s.primeAgent(c, vers, job)
  1035  	a := s.newAgent(c, m)
  1036  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1037  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1038  
  1039  	timeout := time.After(coretesting.LongWait)
  1040  	for done := false; !done; {
  1041  		select {
  1042  		case <-timeout:
  1043  			c.Fatalf("timeout while waiting for agent version to be set")
  1044  		case <-time.After(coretesting.ShortWait):
  1045  			c.Log("Refreshing")
  1046  			err := m.Refresh()
  1047  			c.Assert(err, jc.ErrorIsNil)
  1048  			c.Log("Fetching agent tools")
  1049  			agentTools, err := m.AgentTools()
  1050  			c.Assert(err, jc.ErrorIsNil)
  1051  			c.Logf("(%v vs. %v)", agentTools.Version, version.Current)
  1052  			if agentTools.Version.Minor != version.Current.Minor {
  1053  				continue
  1054  			}
  1055  			c.Assert(agentTools.Version, gc.DeepEquals, version.Current)
  1056  			done = true
  1057  		}
  1058  	}
  1059  }
  1060  
  1061  func (s *MachineSuite) TestAgentSetsToolsVersionManageEnviron(c *gc.C) {
  1062  	s.assertAgentSetsToolsVersion(c, state.JobManageEnviron)
  1063  }
  1064  
  1065  func (s *MachineSuite) TestAgentSetsToolsVersionHostUnits(c *gc.C) {
  1066  	s.assertAgentSetsToolsVersion(c, state.JobHostUnits)
  1067  }
  1068  
  1069  func (s *MachineSuite) TestManageEnvironRunsCleaner(c *gc.C) {
  1070  	s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) {
  1071  		// Create a service and unit, and destroy the service.
  1072  		service := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
  1073  		unit, err := service.AddUnit()
  1074  		c.Assert(err, jc.ErrorIsNil)
  1075  		err = service.Destroy()
  1076  		c.Assert(err, jc.ErrorIsNil)
  1077  
  1078  		// Check the unit was not yet removed.
  1079  		err = unit.Refresh()
  1080  		c.Assert(err, jc.ErrorIsNil)
  1081  		w := unit.Watch()
  1082  		defer w.Stop()
  1083  
  1084  		// Trigger a sync on the state used by the agent, and wait
  1085  		// for the unit to be removed.
  1086  		agentState.StartSync()
  1087  		timeout := time.After(coretesting.LongWait)
  1088  		for done := false; !done; {
  1089  			select {
  1090  			case <-timeout:
  1091  				c.Fatalf("unit not cleaned up")
  1092  			case <-time.After(coretesting.ShortWait):
  1093  				s.State.StartSync()
  1094  			case <-w.Changes():
  1095  				err := unit.Refresh()
  1096  				if errors.IsNotFound(err) {
  1097  					done = true
  1098  				} else {
  1099  					c.Assert(err, jc.ErrorIsNil)
  1100  				}
  1101  			}
  1102  		}
  1103  	})
  1104  }
  1105  
  1106  func (s *MachineSuite) TestJobManageEnvironRunsMinUnitsWorker(c *gc.C) {
  1107  	s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) {
  1108  		// Ensure that the MinUnits worker is alive by doing a simple check
  1109  		// that it responds to state changes: add a service, set its minimum
  1110  		// number of units to one, wait for the worker to add the missing unit.
  1111  		service := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
  1112  		err := service.SetMinUnits(1)
  1113  		c.Assert(err, jc.ErrorIsNil)
  1114  		w := service.Watch()
  1115  		defer w.Stop()
  1116  
  1117  		// Trigger a sync on the state used by the agent, and wait for the unit
  1118  		// to be created.
  1119  		agentState.StartSync()
  1120  		timeout := time.After(coretesting.LongWait)
  1121  		for {
  1122  			select {
  1123  			case <-timeout:
  1124  				c.Fatalf("unit not created")
  1125  			case <-time.After(coretesting.ShortWait):
  1126  				s.State.StartSync()
  1127  			case <-w.Changes():
  1128  				units, err := service.AllUnits()
  1129  				c.Assert(err, jc.ErrorIsNil)
  1130  				if len(units) == 1 {
  1131  					return
  1132  				}
  1133  			}
  1134  		}
  1135  	})
  1136  }
  1137  
  1138  func (s *MachineSuite) TestMachineAgentRunsAuthorisedKeysWorker(c *gc.C) {
  1139  	//TODO(bogdanteleaga): Fix once we get authentication worker up on windows
  1140  	if runtime.GOOS == "windows" {
  1141  		c.Skip("bug 1403084: authentication worker not yet implemented on windows")
  1142  	}
  1143  	// Start the machine agent.
  1144  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1145  	a := s.newAgent(c, m)
  1146  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1147  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1148  
  1149  	// Update the keys in the environment.
  1150  	sshKey := sshtesting.ValidKeyOne.Key + " user@host"
  1151  	err := s.BackingState.UpdateEnvironConfig(map[string]interface{}{"authorized-keys": sshKey}, nil, nil)
  1152  	c.Assert(err, jc.ErrorIsNil)
  1153  
  1154  	// Wait for ssh keys file to be updated.
  1155  	s.State.StartSync()
  1156  	timeout := time.After(coretesting.LongWait)
  1157  	sshKeyWithCommentPrefix := sshtesting.ValidKeyOne.Key + " Juju:user@host"
  1158  	for {
  1159  		select {
  1160  		case <-timeout:
  1161  			c.Fatalf("timeout while waiting for authorised ssh keys to change")
  1162  		case <-time.After(coretesting.ShortWait):
  1163  			keys, err := ssh.ListKeys(authenticationworker.SSHUser, ssh.FullKeys)
  1164  			c.Assert(err, jc.ErrorIsNil)
  1165  			keysStr := strings.Join(keys, "\n")
  1166  			if sshKeyWithCommentPrefix != keysStr {
  1167  				continue
  1168  			}
  1169  			return
  1170  		}
  1171  	}
  1172  }
  1173  
  1174  // opRecvTimeout waits for any of the given kinds of operation to
  1175  // be received from ops, and times out if not.
  1176  func opRecvTimeout(c *gc.C, st *state.State, opc <-chan dummy.Operation, kinds ...dummy.Operation) dummy.Operation {
  1177  	st.StartSync()
  1178  	for {
  1179  		select {
  1180  		case op := <-opc:
  1181  			for _, k := range kinds {
  1182  				if reflect.TypeOf(op) == reflect.TypeOf(k) {
  1183  					return op
  1184  				}
  1185  			}
  1186  			c.Logf("discarding unknown event %#v", op)
  1187  		case <-time.After(15 * time.Second):
  1188  			c.Fatalf("time out wating for operation")
  1189  		}
  1190  	}
  1191  }
  1192  
  1193  func (s *MachineSuite) TestOpenStateFailsForJobHostUnits(c *gc.C) {
  1194  	s.assertJobWithAPI(c, state.JobHostUnits, func(conf agent.Config, st api.Connection) {
  1195  		s.AssertCannotOpenState(c, conf.Tag(), conf.DataDir())
  1196  	})
  1197  }
  1198  
  1199  func (s *MachineSuite) TestOpenStateFailsForJobManageNetworking(c *gc.C) {
  1200  	s.assertJobWithAPI(c, state.JobManageNetworking, func(conf agent.Config, st api.Connection) {
  1201  		s.AssertCannotOpenState(c, conf.Tag(), conf.DataDir())
  1202  	})
  1203  }
  1204  
  1205  func (s *MachineSuite) TestOpenStateWorksForJobManageEnviron(c *gc.C) {
  1206  	s.assertJobWithAPI(c, state.JobManageEnviron, func(conf agent.Config, st api.Connection) {
  1207  		s.AssertCanOpenState(c, conf.Tag(), conf.DataDir())
  1208  	})
  1209  }
  1210  
  1211  func (s *MachineSuite) TestOpenAPIStateWorksForJobHostUnits(c *gc.C) {
  1212  	machine, conf, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1213  	s.runOpenAPISTateTest(c, machine, conf)
  1214  }
  1215  
  1216  func (s *MachineSuite) TestOpenAPIStateWorksForJobManageNetworking(c *gc.C) {
  1217  	machine, conf, _ := s.primeAgent(c, version.Current, state.JobManageNetworking)
  1218  	s.runOpenAPISTateTest(c, machine, conf)
  1219  }
  1220  
  1221  func (s *MachineSuite) TestOpenAPIStateWorksForJobManageEnviron(c *gc.C) {
  1222  	machine, conf, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1223  	s.runOpenAPISTateTest(c, machine, conf)
  1224  }
  1225  
  1226  func (s *MachineSuite) runOpenAPISTateTest(c *gc.C, machine *state.Machine, conf agent.Config) {
  1227  	configPath := agent.ConfigPath(conf.DataDir(), conf.Tag())
  1228  
  1229  	// Set a failing password...
  1230  	confW, err := agent.ReadConfig(configPath)
  1231  	c.Assert(err, jc.ErrorIsNil)
  1232  	confW.SetPassword("not-set-on-state-server")
  1233  
  1234  	// ...and also make sure the api info points to the testing api
  1235  	// server (and not, as for JobManageEnviron machines, to the port
  1236  	// chosen for the agent's own API server to run on. This is usually
  1237  	// sane, but inconvenient here because we're not running the full
  1238  	// agent and so the configured API server is not actually there).
  1239  	apiInfo := s.APIInfo(c)
  1240  	hostPorts, err := network.ParseHostPorts(apiInfo.Addrs...)
  1241  	c.Assert(err, jc.ErrorIsNil)
  1242  	confW.SetAPIHostPorts([][]network.HostPort{hostPorts})
  1243  	err = confW.Write()
  1244  	c.Assert(err, jc.ErrorIsNil)
  1245  
  1246  	// Check that it successfully connects with the conf's old password.
  1247  	assertOpen := func() {
  1248  		tagString := conf.Tag().String()
  1249  		agent := NewAgentConf(conf.DataDir())
  1250  		err := agent.ReadConfig(tagString)
  1251  		c.Assert(err, jc.ErrorIsNil)
  1252  		st, gotEntity, err := apicaller.OpenAPIState(agent)
  1253  		c.Assert(err, jc.ErrorIsNil)
  1254  		c.Assert(st, gc.NotNil)
  1255  		st.Close()
  1256  		c.Assert(gotEntity.Tag(), gc.Equals, tagString)
  1257  	}
  1258  	assertOpen()
  1259  
  1260  	// Check that the initial password is no longer valid.
  1261  	assertPassword := func(password string, valid bool) {
  1262  		err := machine.Refresh()
  1263  		c.Assert(err, jc.ErrorIsNil)
  1264  		c.Check(machine.PasswordValid(password), gc.Equals, valid)
  1265  	}
  1266  	assertPassword(initialMachinePassword, false)
  1267  
  1268  	// Read the configuration and check that we can connect with it.
  1269  	confR, err := agent.ReadConfig(configPath)
  1270  	c.Assert(err, gc.IsNil)
  1271  	newPassword := confR.APIInfo().Password
  1272  	assertPassword(newPassword, true)
  1273  
  1274  	// Double-check that we can open a fresh connection with the stored
  1275  	// conf ... and that the password hasn't been changed again.
  1276  	assertOpen()
  1277  	assertPassword(newPassword, true)
  1278  }
  1279  
  1280  func (s *MachineSuite) TestMachineAgentSymlinkJujuRun(c *gc.C) {
  1281  	_, err := os.Stat(JujuRun)
  1282  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1283  	s.assertJobWithAPI(c, state.JobManageEnviron, func(conf agent.Config, st api.Connection) {
  1284  		// juju-run should have been created
  1285  		_, err := os.Stat(JujuRun)
  1286  		c.Assert(err, jc.ErrorIsNil)
  1287  	})
  1288  }
  1289  
  1290  func (s *MachineSuite) TestMachineAgentSymlinkJujuRunExists(c *gc.C) {
  1291  	if runtime.GOOS == "windows" {
  1292  		// Cannot make symlink to nonexistent file on windows or
  1293  		// create a file point a symlink to it then remove it
  1294  		c.Skip("Cannot test this on windows")
  1295  	}
  1296  	err := symlink.New("/nowhere/special", JujuRun)
  1297  	c.Assert(err, jc.ErrorIsNil)
  1298  	_, err = os.Stat(JujuRun)
  1299  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1300  	s.assertJobWithAPI(c, state.JobManageEnviron, func(conf agent.Config, st api.Connection) {
  1301  		// juju-run should have been recreated
  1302  		_, err := os.Stat(JujuRun)
  1303  		c.Assert(err, jc.ErrorIsNil)
  1304  		link, err := symlink.Read(JujuRun)
  1305  		c.Assert(err, jc.ErrorIsNil)
  1306  		c.Assert(link, gc.Not(gc.Equals), "/nowhere/special")
  1307  	})
  1308  }
  1309  
  1310  func (s *MachineSuite) TestProxyUpdater(c *gc.C) {
  1311  	s.assertProxyUpdater(c, true)
  1312  	s.assertProxyUpdater(c, false)
  1313  }
  1314  
  1315  func (s *MachineSuite) assertProxyUpdater(c *gc.C, expectWriteSystemFiles bool) {
  1316  	// Patch out the func that decides whether we should write system files.
  1317  	var gotConf agent.Config
  1318  	s.AgentSuite.PatchValue(&shouldWriteProxyFiles, func(conf agent.Config) bool {
  1319  		gotConf = conf
  1320  		return expectWriteSystemFiles
  1321  	})
  1322  
  1323  	// Make sure there are some proxy settings to write.
  1324  	expectSettings := proxy.Settings{
  1325  		Http:  "http proxy",
  1326  		Https: "https proxy",
  1327  		Ftp:   "ftp proxy",
  1328  	}
  1329  	updateAttrs := config.ProxyConfigMap(expectSettings)
  1330  	err := s.State.UpdateEnvironConfig(updateAttrs, nil, nil)
  1331  	c.Assert(err, jc.ErrorIsNil)
  1332  
  1333  	// Patch out the actual worker func.
  1334  	started := make(chan struct{})
  1335  	mockNew := func(api *apienvironment.Facade, writeSystemFiles bool) worker.Worker {
  1336  		// Direct check of the behaviour flag.
  1337  		c.Check(writeSystemFiles, gc.Equals, expectWriteSystemFiles)
  1338  		// Indirect check that we get a functional API.
  1339  		conf, err := api.EnvironConfig()
  1340  		if c.Check(err, jc.ErrorIsNil) {
  1341  			actualSettings := conf.ProxySettings()
  1342  			c.Check(actualSettings, jc.DeepEquals, expectSettings)
  1343  		}
  1344  		return worker.NewSimpleWorker(func(_ <-chan struct{}) error {
  1345  			close(started)
  1346  			return nil
  1347  		})
  1348  	}
  1349  	s.AgentSuite.PatchValue(&proxyupdater.New, mockNew)
  1350  
  1351  	s.primeAgent(c, version.Current, state.JobHostUnits)
  1352  	s.assertJobWithAPI(c, state.JobHostUnits, func(conf agent.Config, st api.Connection) {
  1353  		for {
  1354  			select {
  1355  			case <-time.After(coretesting.LongWait):
  1356  				c.Fatalf("timeout while waiting for proxy updater to start")
  1357  			case <-started:
  1358  				c.Assert(gotConf, jc.DeepEquals, conf)
  1359  				return
  1360  			}
  1361  		}
  1362  	})
  1363  }
  1364  
  1365  func (s *MachineSuite) TestMachineAgentUninstall(c *gc.C) {
  1366  	m, ac, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1367  	err := m.EnsureDead()
  1368  	c.Assert(err, jc.ErrorIsNil)
  1369  	a := s.newAgent(c, m)
  1370  	err = runWithTimeout(a)
  1371  	c.Assert(err, jc.ErrorIsNil)
  1372  	// juju-run should have been removed on termination
  1373  	_, err = os.Stat(JujuRun)
  1374  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1375  	// data-dir should have been removed on termination
  1376  	_, err = os.Stat(ac.DataDir())
  1377  	c.Assert(err, jc.Satisfies, os.IsNotExist)
  1378  }
  1379  
  1380  func (s *MachineSuite) TestMachineAgentRsyslogManageEnviron(c *gc.C) {
  1381  	s.testMachineAgentRsyslogConfigWorker(c, state.JobManageEnviron, rsyslog.RsyslogModeAccumulate)
  1382  }
  1383  
  1384  func (s *MachineSuite) TestMachineAgentRsyslogHostUnits(c *gc.C) {
  1385  	s.testMachineAgentRsyslogConfigWorker(c, state.JobHostUnits, rsyslog.RsyslogModeForwarding)
  1386  }
  1387  
  1388  func (s *MachineSuite) testMachineAgentRsyslogConfigWorker(c *gc.C, job state.MachineJob, expectedMode rsyslog.RsyslogMode) {
  1389  	created := make(chan rsyslog.RsyslogMode, 1)
  1390  	s.AgentSuite.PatchValue(&cmdutil.NewRsyslogConfigWorker, func(_ *apirsyslog.State, _ agent.Config, mode rsyslog.RsyslogMode) (worker.Worker, error) {
  1391  		created <- mode
  1392  		return newDummyWorker(), nil
  1393  	})
  1394  	s.assertJobWithAPI(c, job, func(conf agent.Config, st api.Connection) {
  1395  		select {
  1396  		case <-time.After(coretesting.LongWait):
  1397  			c.Fatalf("timeout while waiting for rsyslog worker to be created")
  1398  		case mode := <-created:
  1399  			c.Assert(mode, gc.Equals, expectedMode)
  1400  		}
  1401  	})
  1402  }
  1403  
  1404  func (s *MachineSuite) TestMachineAgentRunsAPIAddressUpdaterWorker(c *gc.C) {
  1405  	// Start the machine agent.
  1406  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1407  	a := s.newAgent(c, m)
  1408  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1409  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1410  
  1411  	// Update the API addresses.
  1412  	updatedServers := [][]network.HostPort{
  1413  		network.NewHostPorts(1234, "localhost"),
  1414  	}
  1415  	err := s.BackingState.SetAPIHostPorts(updatedServers)
  1416  	c.Assert(err, jc.ErrorIsNil)
  1417  
  1418  	// Wait for config to be updated.
  1419  	s.BackingState.StartSync()
  1420  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
  1421  		addrs, err := a.CurrentConfig().APIAddresses()
  1422  		c.Assert(err, jc.ErrorIsNil)
  1423  		if reflect.DeepEqual(addrs, []string{"localhost:1234"}) {
  1424  			return
  1425  		}
  1426  	}
  1427  	c.Fatalf("timeout while waiting for agent config to change")
  1428  }
  1429  
  1430  func (s *MachineSuite) TestMachineAgentRunsDiskManagerWorker(c *gc.C) {
  1431  	// Patch out the worker func before starting the agent.
  1432  	started := make(chan struct{})
  1433  	newWorker := func(diskmanager.ListBlockDevicesFunc, diskmanager.BlockDeviceSetter) worker.Worker {
  1434  		close(started)
  1435  		return worker.NewNoOpWorker()
  1436  	}
  1437  	s.PatchValue(&newDiskManager, newWorker)
  1438  
  1439  	// Start the machine agent.
  1440  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1441  	a := s.newAgent(c, m)
  1442  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1443  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1444  
  1445  	// Wait for worker to be started.
  1446  	select {
  1447  	case <-started:
  1448  	case <-time.After(coretesting.LongWait):
  1449  		c.Fatalf("timeout while waiting for diskmanager worker to start")
  1450  	}
  1451  }
  1452  
  1453  func (s *MachineSuite) TestDiskManagerWorkerUpdatesState(c *gc.C) {
  1454  	expected := []storage.BlockDevice{{DeviceName: "whatever"}}
  1455  	s.PatchValue(&diskmanager.DefaultListBlockDevices, func() ([]storage.BlockDevice, error) {
  1456  		return expected, nil
  1457  	})
  1458  
  1459  	// Start the machine agent.
  1460  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1461  	a := s.newAgent(c, m)
  1462  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1463  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1464  
  1465  	// Wait for state to be updated.
  1466  	s.BackingState.StartSync()
  1467  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
  1468  		devices, err := s.BackingState.BlockDevices(m.MachineTag())
  1469  		c.Assert(err, jc.ErrorIsNil)
  1470  		if len(devices) > 0 {
  1471  			c.Assert(devices, gc.HasLen, 1)
  1472  			c.Assert(devices[0].DeviceName, gc.Equals, expected[0].DeviceName)
  1473  			return
  1474  		}
  1475  	}
  1476  	c.Fatalf("timeout while waiting for block devices to be recorded")
  1477  }
  1478  
  1479  func (s *MachineSuite) TestMachineAgentRunsMachineStorageWorker(c *gc.C) {
  1480  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1481  
  1482  	started := make(chan struct{})
  1483  	newWorker := func(
  1484  		scope names.Tag,
  1485  		storageDir string,
  1486  		_ storageprovisioner.VolumeAccessor,
  1487  		_ storageprovisioner.FilesystemAccessor,
  1488  		_ storageprovisioner.LifecycleManager,
  1489  		_ storageprovisioner.EnvironAccessor,
  1490  		_ storageprovisioner.MachineAccessor,
  1491  		_ storageprovisioner.StatusSetter,
  1492  		_ clock.Clock,
  1493  	) worker.Worker {
  1494  		c.Check(scope, gc.Equals, m.Tag())
  1495  		// storageDir is not empty for machine scoped storage provisioners
  1496  		c.Assert(storageDir, gc.Not(gc.Equals), "")
  1497  		close(started)
  1498  		return worker.NewNoOpWorker()
  1499  	}
  1500  	s.PatchValue(&newStorageWorker, newWorker)
  1501  
  1502  	// Start the machine agent.
  1503  	a := s.newAgent(c, m)
  1504  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1505  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1506  
  1507  	// Wait for worker to be started.
  1508  	select {
  1509  	case <-started:
  1510  	case <-time.After(coretesting.LongWait):
  1511  		c.Fatalf("timeout while waiting for storage worker to start")
  1512  	}
  1513  }
  1514  
  1515  func (s *MachineSuite) TestMachineAgentRunsEnvironStorageWorker(c *gc.C) {
  1516  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1517  
  1518  	var numWorkers, machineWorkers, environWorkers uint32
  1519  	started := make(chan struct{})
  1520  	newWorker := func(
  1521  		scope names.Tag,
  1522  		storageDir string,
  1523  		_ storageprovisioner.VolumeAccessor,
  1524  		_ storageprovisioner.FilesystemAccessor,
  1525  		_ storageprovisioner.LifecycleManager,
  1526  		_ storageprovisioner.EnvironAccessor,
  1527  		_ storageprovisioner.MachineAccessor,
  1528  		_ storageprovisioner.StatusSetter,
  1529  		_ clock.Clock,
  1530  	) worker.Worker {
  1531  		// storageDir is empty for environ storage provisioners
  1532  		if storageDir == "" {
  1533  			c.Check(scope, gc.Equals, s.State.EnvironTag())
  1534  			c.Check(atomic.AddUint32(&environWorkers, 1), gc.Equals, uint32(1))
  1535  			atomic.AddUint32(&numWorkers, 1)
  1536  		}
  1537  		if storageDir != "" {
  1538  			c.Check(scope, gc.Equals, m.Tag())
  1539  			c.Check(atomic.AddUint32(&machineWorkers, 1), gc.Equals, uint32(1))
  1540  			atomic.AddUint32(&numWorkers, 1)
  1541  		}
  1542  		if atomic.LoadUint32(&environWorkers) == 1 && atomic.LoadUint32(&machineWorkers) == 1 {
  1543  			close(started)
  1544  		}
  1545  		return worker.NewNoOpWorker()
  1546  	}
  1547  	s.PatchValue(&newStorageWorker, newWorker)
  1548  
  1549  	// Start the machine agent.
  1550  	a := s.newAgent(c, m)
  1551  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1552  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1553  
  1554  	// Wait for worker to be started.
  1555  	select {
  1556  	case <-started:
  1557  		c.Assert(atomic.LoadUint32(&numWorkers), gc.Equals, uint32(2))
  1558  	case <-time.After(coretesting.LongWait):
  1559  		c.Fatalf("timeout while waiting for storage worker to start")
  1560  	}
  1561  }
  1562  
  1563  func (s *MachineSuite) TestMachineAgentRunsCertificateUpdateWorkerForStateServer(c *gc.C) {
  1564  	started := make(chan struct{})
  1565  	newUpdater := func(certupdater.AddressWatcher, certupdater.StateServingInfoGetter, certupdater.EnvironConfigGetter,
  1566  		certupdater.APIHostPortsGetter, certupdater.StateServingInfoSetter, chan params.StateServingInfo,
  1567  	) worker.Worker {
  1568  		close(started)
  1569  		return worker.NewNoOpWorker()
  1570  	}
  1571  	s.PatchValue(&newCertificateUpdater, newUpdater)
  1572  
  1573  	// Start the machine agent.
  1574  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1575  	a := s.newAgent(c, m)
  1576  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1577  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1578  
  1579  	// Wait for worker to be started.
  1580  	select {
  1581  	case <-started:
  1582  	case <-time.After(coretesting.LongWait):
  1583  		c.Fatalf("timeout while waiting for certificate update worker to start")
  1584  	}
  1585  }
  1586  
  1587  func (s *MachineSuite) TestMachineAgentDoesNotRunsCertificateUpdateWorkerForNonStateServer(c *gc.C) {
  1588  	started := make(chan struct{})
  1589  	newUpdater := func(certupdater.AddressWatcher, certupdater.StateServingInfoGetter, certupdater.EnvironConfigGetter,
  1590  		certupdater.APIHostPortsGetter, certupdater.StateServingInfoSetter, chan params.StateServingInfo,
  1591  	) worker.Worker {
  1592  		close(started)
  1593  		return worker.NewNoOpWorker()
  1594  	}
  1595  	s.PatchValue(&newCertificateUpdater, newUpdater)
  1596  
  1597  	// Start the machine agent.
  1598  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1599  	a := s.newAgent(c, m)
  1600  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1601  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1602  
  1603  	// Ensure the worker is not started.
  1604  	select {
  1605  	case <-started:
  1606  		c.Fatalf("certificate update worker unexpectedly started")
  1607  	case <-time.After(coretesting.ShortWait):
  1608  	}
  1609  }
  1610  
  1611  func (s *MachineSuite) TestCertificateUpdateWorkerUpdatesCertificate(c *gc.C) {
  1612  	// Set up the machine agent.
  1613  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1614  	a := s.newAgent(c, m)
  1615  	a.ReadConfig(names.NewMachineTag(m.Id()).String())
  1616  
  1617  	// Set up check that certificate has been updated.
  1618  	updated := make(chan struct{})
  1619  	go func() {
  1620  		for {
  1621  			stateInfo, _ := a.CurrentConfig().StateServingInfo()
  1622  			srvCert, err := cert.ParseCert(stateInfo.Cert)
  1623  			c.Assert(err, jc.ErrorIsNil)
  1624  			sanIPs := make([]string, len(srvCert.IPAddresses))
  1625  			for i, ip := range srvCert.IPAddresses {
  1626  				sanIPs[i] = ip.String()
  1627  			}
  1628  			if len(sanIPs) == 1 && sanIPs[0] == "0.1.2.3" {
  1629  				close(updated)
  1630  				break
  1631  			}
  1632  			time.Sleep(10 * time.Millisecond)
  1633  		}
  1634  	}()
  1635  
  1636  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1637  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1638  	// Wait for certificate to be updated.
  1639  	select {
  1640  	case <-updated:
  1641  	case <-time.After(coretesting.LongWait):
  1642  		c.Fatalf("timeout while waiting for certificate to be updated")
  1643  	}
  1644  }
  1645  
  1646  func (s *MachineSuite) TestCertificateDNSUpdated(c *gc.C) {
  1647  	// Disable the certificate work so it doesn't update the certificate.
  1648  	newUpdater := func(certupdater.AddressWatcher, certupdater.StateServingInfoGetter, certupdater.EnvironConfigGetter,
  1649  		certupdater.APIHostPortsGetter, certupdater.StateServingInfoSetter, chan params.StateServingInfo,
  1650  	) worker.Worker {
  1651  		return worker.NewNoOpWorker()
  1652  	}
  1653  	s.PatchValue(&newCertificateUpdater, newUpdater)
  1654  
  1655  	// Set up the machine agent.
  1656  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1657  	a := s.newAgent(c, m)
  1658  
  1659  	// Set up check that certificate has been updated when the agent starts.
  1660  	updated := make(chan struct{})
  1661  	expectedDnsNames := set.NewStrings("local", "juju-apiserver", "juju-mongodb")
  1662  	go func() {
  1663  		for {
  1664  			stateInfo, _ := a.CurrentConfig().StateServingInfo()
  1665  			srvCert, err := cert.ParseCert(stateInfo.Cert)
  1666  			c.Assert(err, jc.ErrorIsNil)
  1667  			certDnsNames := set.NewStrings(srvCert.DNSNames...)
  1668  			if !expectedDnsNames.Difference(certDnsNames).IsEmpty() {
  1669  				continue
  1670  			}
  1671  			pemContent, err := ioutil.ReadFile(filepath.Join(s.DataDir(), "server.pem"))
  1672  			c.Assert(err, jc.ErrorIsNil)
  1673  			if string(pemContent) == stateInfo.Cert+"\n"+stateInfo.PrivateKey {
  1674  				close(updated)
  1675  				break
  1676  			}
  1677  			time.Sleep(10 * time.Millisecond)
  1678  		}
  1679  	}()
  1680  
  1681  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1682  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1683  	// Wait for certificate to be updated.
  1684  	select {
  1685  	case <-updated:
  1686  	case <-time.After(coretesting.LongWait):
  1687  		c.Fatalf("timeout while waiting for certificate to be updated")
  1688  	}
  1689  }
  1690  
  1691  func (s *MachineSuite) TestMachineAgentNetworkerMode(c *gc.C) {
  1692  	tests := []struct {
  1693  		about          string
  1694  		managedNetwork bool
  1695  		jobs           []state.MachineJob
  1696  		intrusiveMode  bool
  1697  	}{{
  1698  		about:          "network management enabled, network management job set",
  1699  		managedNetwork: true,
  1700  		jobs:           []state.MachineJob{state.JobHostUnits, state.JobManageNetworking},
  1701  		intrusiveMode:  true,
  1702  	}, {
  1703  		about:          "network management disabled, network management job set",
  1704  		managedNetwork: false,
  1705  		jobs:           []state.MachineJob{state.JobHostUnits, state.JobManageNetworking},
  1706  		intrusiveMode:  false,
  1707  	}, {
  1708  		about:          "network management enabled, network management job not set",
  1709  		managedNetwork: true,
  1710  		jobs:           []state.MachineJob{state.JobHostUnits},
  1711  		intrusiveMode:  false,
  1712  	}, {
  1713  		about:          "network management disabled, network management job not set",
  1714  		managedNetwork: false,
  1715  		jobs:           []state.MachineJob{state.JobHostUnits},
  1716  		intrusiveMode:  false,
  1717  	}}
  1718  	// Perform tests.
  1719  	for i, test := range tests {
  1720  		c.Logf("test #%d: %s", i, test.about)
  1721  
  1722  		modeCh := make(chan bool, 1)
  1723  		s.AgentSuite.PatchValue(&newNetworker, func(
  1724  			st apinetworker.State,
  1725  			conf agent.Config,
  1726  			intrusiveMode bool,
  1727  			configBaseDir string,
  1728  		) (*networker.Networker, error) {
  1729  			select {
  1730  			case modeCh <- intrusiveMode:
  1731  			default:
  1732  			}
  1733  			return networker.NewNetworker(st, conf, intrusiveMode, configBaseDir)
  1734  		})
  1735  
  1736  		attrs := coretesting.Attrs{"disable-network-management": !test.managedNetwork}
  1737  		err := s.BackingState.UpdateEnvironConfig(attrs, nil, nil)
  1738  		c.Assert(err, jc.ErrorIsNil)
  1739  
  1740  		m, _, _ := s.primeAgent(c, version.Current, test.jobs...)
  1741  		a := s.newAgent(c, m)
  1742  		defer a.Stop()
  1743  		doneCh := make(chan error)
  1744  		go func() {
  1745  			doneCh <- a.Run(nil)
  1746  		}()
  1747  
  1748  		select {
  1749  		case intrusiveMode := <-modeCh:
  1750  			if intrusiveMode != test.intrusiveMode {
  1751  				c.Fatalf("expected networker intrusive mode = %v, got mode = %v", test.intrusiveMode, intrusiveMode)
  1752  			}
  1753  		case <-time.After(coretesting.LongWait):
  1754  			c.Fatalf("timed out waiting for the networker to start")
  1755  		}
  1756  		s.waitStopped(c, state.JobManageNetworking, a, doneCh)
  1757  	}
  1758  }
  1759  
  1760  func (s *MachineSuite) TestMachineAgentIgnoreAddresses(c *gc.C) {
  1761  	for _, expectedIgnoreValue := range []bool{true, false} {
  1762  		ignoreAddressCh := make(chan bool, 1)
  1763  		s.AgentSuite.PatchValue(&newMachiner, func(
  1764  			accessor machiner.MachineAccessor,
  1765  			conf agent.Config,
  1766  			ignoreMachineAddresses bool,
  1767  		) worker.Worker {
  1768  			select {
  1769  			case ignoreAddressCh <- ignoreMachineAddresses:
  1770  			default:
  1771  			}
  1772  			return machiner.NewMachiner(accessor, conf, ignoreMachineAddresses)
  1773  		})
  1774  
  1775  		attrs := coretesting.Attrs{"ignore-machine-addresses": expectedIgnoreValue}
  1776  		err := s.BackingState.UpdateEnvironConfig(attrs, nil, nil)
  1777  		c.Assert(err, jc.ErrorIsNil)
  1778  
  1779  		m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1780  		a := s.newAgent(c, m)
  1781  		defer a.Stop()
  1782  		doneCh := make(chan error)
  1783  		go func() {
  1784  			doneCh <- a.Run(nil)
  1785  		}()
  1786  
  1787  		select {
  1788  		case ignoreMachineAddresses := <-ignoreAddressCh:
  1789  			if ignoreMachineAddresses != expectedIgnoreValue {
  1790  				c.Fatalf("expected ignore-machine-addresses = %v, got = %v", expectedIgnoreValue, ignoreMachineAddresses)
  1791  			}
  1792  		case <-time.After(coretesting.LongWait):
  1793  			c.Fatalf("timed out waiting for the machiner to start")
  1794  		}
  1795  		s.waitStopped(c, state.JobHostUnits, a, doneCh)
  1796  	}
  1797  }
  1798  
  1799  func (s *MachineSuite) TestMachineAgentUpgradeMongo(c *gc.C) {
  1800  	m, agentConfig, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1801  	agentConfig.SetUpgradedToVersion(version.MustParse("1.18.0"))
  1802  	err := agentConfig.Write()
  1803  	c.Assert(err, jc.ErrorIsNil)
  1804  	err = s.State.MongoSession().DB("admin").RemoveUser(m.Tag().String())
  1805  	c.Assert(err, jc.ErrorIsNil)
  1806  
  1807  	s.fakeEnsureMongo.ServiceInstalled = true
  1808  	s.fakeEnsureMongo.ReplicasetInitiated = false
  1809  
  1810  	s.AgentSuite.PatchValue(&ensureMongoAdminUser, func(p mongo.EnsureAdminUserParams) (bool, error) {
  1811  		err := s.State.MongoSession().DB("admin").AddUser(p.User, p.Password, false)
  1812  		c.Assert(err, jc.ErrorIsNil)
  1813  		return true, nil
  1814  	})
  1815  
  1816  	stateOpened := make(chan interface{}, 1)
  1817  	s.AgentSuite.PatchValue(&reportOpenedState, func(st io.Closer) {
  1818  		select {
  1819  		case stateOpened <- st:
  1820  		default:
  1821  		}
  1822  	})
  1823  
  1824  	// Start the machine agent, and wait for state to be opened.
  1825  	a := s.newAgent(c, m)
  1826  	done := make(chan error)
  1827  	go func() { done <- a.Run(nil) }()
  1828  	defer a.Stop() // in case of failure
  1829  	select {
  1830  	case st := <-stateOpened:
  1831  		c.Assert(st, gc.NotNil)
  1832  	case <-time.After(coretesting.LongWait):
  1833  		c.Fatalf("state not opened")
  1834  	}
  1835  	s.waitStopped(c, state.JobManageEnviron, a, done)
  1836  	c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1)
  1837  	c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 1)
  1838  }
  1839  
  1840  func (s *MachineSuite) TestMachineAgentSetsPrepareRestore(c *gc.C) {
  1841  	// Start the machine agent.
  1842  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1843  	a := s.newAgent(c, m)
  1844  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1845  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1846  	c.Check(a.IsRestorePreparing(), jc.IsFalse)
  1847  	c.Check(a.IsRestoreRunning(), jc.IsFalse)
  1848  	err := a.PrepareRestore()
  1849  	c.Assert(err, jc.ErrorIsNil)
  1850  	c.Assert(a.IsRestorePreparing(), jc.IsTrue)
  1851  	c.Assert(a.IsRestoreRunning(), jc.IsFalse)
  1852  	err = a.PrepareRestore()
  1853  	c.Assert(err, gc.ErrorMatches, "already in restore mode")
  1854  }
  1855  
  1856  func (s *MachineSuite) TestMachineAgentSetsRestoreInProgress(c *gc.C) {
  1857  	// Start the machine agent.
  1858  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1859  	a := s.newAgent(c, m)
  1860  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1861  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1862  	c.Check(a.IsRestorePreparing(), jc.IsFalse)
  1863  	c.Check(a.IsRestoreRunning(), jc.IsFalse)
  1864  	err := a.PrepareRestore()
  1865  	c.Assert(err, jc.ErrorIsNil)
  1866  	c.Assert(a.IsRestorePreparing(), jc.IsTrue)
  1867  	err = a.BeginRestore()
  1868  	c.Assert(err, jc.ErrorIsNil)
  1869  	c.Assert(a.IsRestoreRunning(), jc.IsTrue)
  1870  	err = a.BeginRestore()
  1871  	c.Assert(err, gc.ErrorMatches, "already restoring")
  1872  }
  1873  
  1874  func (s *MachineSuite) TestMachineAgentRestoreRequiresPrepare(c *gc.C) {
  1875  	// Start the machine agent.
  1876  	m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits)
  1877  	a := s.newAgent(c, m)
  1878  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1879  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  1880  	c.Check(a.IsRestorePreparing(), jc.IsFalse)
  1881  	c.Check(a.IsRestoreRunning(), jc.IsFalse)
  1882  	err := a.BeginRestore()
  1883  	c.Assert(err, gc.ErrorMatches, "not in restore mode, cannot begin restoration")
  1884  	c.Assert(a.IsRestoreRunning(), jc.IsFalse)
  1885  }
  1886  
  1887  func (s *MachineSuite) TestNewEnvironmentStartsNewWorkers(c *gc.C) {
  1888  	_, closer := s.setUpNewEnvironment(c)
  1889  	defer closer()
  1890  	expectedWorkers, closer := s.setUpAgent(c)
  1891  	defer closer()
  1892  
  1893  	r1 := s.singularRecord.nextRunner(c)
  1894  	workers := r1.waitForWorker(c, "firewaller")
  1895  	c.Assert(workers, jc.SameContents, expectedWorkers)
  1896  }
  1897  
  1898  func (s *MachineSuite) TestNewStorageWorkerIsScopedToNewEnviron(c *gc.C) {
  1899  	st, closer := s.setUpNewEnvironment(c)
  1900  	defer closer()
  1901  
  1902  	// Check that newStorageWorker is called and the environ tag is scoped to
  1903  	// that of the new environment tag.
  1904  	started := make(chan struct{})
  1905  	newWorker := func(
  1906  		scope names.Tag,
  1907  		storageDir string,
  1908  		_ storageprovisioner.VolumeAccessor,
  1909  		_ storageprovisioner.FilesystemAccessor,
  1910  		_ storageprovisioner.LifecycleManager,
  1911  		_ storageprovisioner.EnvironAccessor,
  1912  		_ storageprovisioner.MachineAccessor,
  1913  		_ storageprovisioner.StatusSetter,
  1914  		_ clock.Clock,
  1915  	) worker.Worker {
  1916  		// storageDir is empty for environ storage provisioners
  1917  		if storageDir == "" {
  1918  			// If this is the worker for the new environment,
  1919  			// close the channel.
  1920  			if scope == st.EnvironTag() {
  1921  				close(started)
  1922  			}
  1923  		}
  1924  		return worker.NewNoOpWorker()
  1925  	}
  1926  	s.PatchValue(&newStorageWorker, newWorker)
  1927  
  1928  	_, closer = s.setUpAgent(c)
  1929  	defer closer()
  1930  
  1931  	// Wait for newStorageWorker to be started.
  1932  	select {
  1933  	case <-started:
  1934  	case <-time.After(coretesting.LongWait):
  1935  		c.Fatalf("timeout while waiting for storage worker to start")
  1936  	}
  1937  }
  1938  
  1939  func (s *MachineSuite) setUpNewEnvironment(c *gc.C) (newSt *state.State, closer func()) {
  1940  	// Create a new environment, tests can now watch if workers start for it.
  1941  	newSt = s.Factory.MakeEnvironment(c, &factory.EnvParams{
  1942  		ConfigAttrs: map[string]interface{}{
  1943  			"state-server": false,
  1944  		},
  1945  		Prepare: true,
  1946  	})
  1947  	return newSt, func() {
  1948  		newSt.Close()
  1949  	}
  1950  }
  1951  
  1952  func (s *MachineSuite) setUpAgent(c *gc.C) (expectedWorkers []string, closer func()) {
  1953  	expectedWorkers = make([]string, 0, len(perEnvSingularWorkers)+1)
  1954  	for _, w := range perEnvSingularWorkers {
  1955  		expectedWorkers = append(expectedWorkers, w)
  1956  		if w == "environ-provisioner" {
  1957  			expectedWorkers = append(expectedWorkers, "environ-storageprovisioner")
  1958  		}
  1959  	}
  1960  	s.PatchValue(&watcher.Period, 100*time.Millisecond)
  1961  
  1962  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1963  	a := s.newAgent(c, m)
  1964  	go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }()
  1965  
  1966  	_ = s.singularRecord.nextRunner(c) // Don't care about this one for this test.
  1967  
  1968  	// Wait for the workers for the initial env to start. The
  1969  	// firewaller is the last worker started for a new environment.
  1970  	r0 := s.singularRecord.nextRunner(c)
  1971  	workers := r0.waitForWorker(c, "firewaller")
  1972  	c.Assert(workers, jc.SameContents, expectedWorkers)
  1973  
  1974  	return expectedWorkers, func() {
  1975  		c.Check(a.Stop(), jc.ErrorIsNil)
  1976  	}
  1977  }
  1978  
  1979  func (s *MachineSuite) TestReplicasetInitiation(c *gc.C) {
  1980  	if runtime.GOOS == "windows" {
  1981  		c.Skip("state servers on windows aren't supported")
  1982  	}
  1983  
  1984  	s.fakeEnsureMongo.ReplicasetInitiated = false
  1985  
  1986  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  1987  	a := s.newAgent(c, m)
  1988  	agentConfig := a.CurrentConfig()
  1989  
  1990  	err := a.ensureMongoServer(agentConfig)
  1991  	c.Assert(err, jc.ErrorIsNil)
  1992  
  1993  	c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1)
  1994  	c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 1)
  1995  }
  1996  
  1997  func (s *MachineSuite) TestReplicasetAlreadyInitiated(c *gc.C) {
  1998  	if runtime.GOOS == "windows" {
  1999  		c.Skip("state servers on windows aren't supported")
  2000  	}
  2001  
  2002  	s.fakeEnsureMongo.ReplicasetInitiated = true
  2003  
  2004  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  2005  	a := s.newAgent(c, m)
  2006  	agentConfig := a.CurrentConfig()
  2007  
  2008  	err := a.ensureMongoServer(agentConfig)
  2009  	c.Assert(err, jc.ErrorIsNil)
  2010  
  2011  	c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1)
  2012  	c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 0)
  2013  }
  2014  
  2015  func (s *MachineSuite) TestReplicasetInitForNewStateServer(c *gc.C) {
  2016  	if runtime.GOOS == "windows" {
  2017  		c.Skip("state servers on windows aren't supported")
  2018  	}
  2019  
  2020  	s.fakeEnsureMongo.ServiceInstalled = false
  2021  	s.fakeEnsureMongo.ReplicasetInitiated = true
  2022  
  2023  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  2024  	a := s.newAgent(c, m)
  2025  	agentConfig := a.CurrentConfig()
  2026  
  2027  	err := a.ensureMongoServer(agentConfig)
  2028  	c.Assert(err, jc.ErrorIsNil)
  2029  
  2030  	c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1)
  2031  	c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 0)
  2032  }
  2033  
  2034  // MachineWithCharmsSuite provides infrastructure for tests which need to
  2035  // work with charms.
  2036  type MachineWithCharmsSuite struct {
  2037  	commonMachineSuite
  2038  	charmtesting.CharmSuite
  2039  
  2040  	machine *state.Machine
  2041  }
  2042  
  2043  func (s *MachineWithCharmsSuite) SetUpSuite(c *gc.C) {
  2044  	s.commonMachineSuite.SetUpSuite(c)
  2045  	s.CharmSuite.SetUpSuite(c, &s.commonMachineSuite.JujuConnSuite)
  2046  }
  2047  
  2048  func (s *MachineWithCharmsSuite) TearDownSuite(c *gc.C) {
  2049  	s.commonMachineSuite.TearDownSuite(c)
  2050  	s.CharmSuite.TearDownSuite(c)
  2051  }
  2052  
  2053  func (s *MachineWithCharmsSuite) SetUpTest(c *gc.C) {
  2054  	s.commonMachineSuite.SetUpTest(c)
  2055  	s.CharmSuite.SetUpTest(c)
  2056  }
  2057  
  2058  func (s *MachineWithCharmsSuite) TearDownTest(c *gc.C) {
  2059  	s.commonMachineSuite.TearDownTest(c)
  2060  	s.CharmSuite.TearDownTest(c)
  2061  }
  2062  
  2063  func (s *MachineWithCharmsSuite) TestManageEnvironRunsCharmRevisionUpdater(c *gc.C) {
  2064  	m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron)
  2065  
  2066  	s.SetupScenario(c)
  2067  
  2068  	a := s.newAgent(c, m)
  2069  	go func() {
  2070  		c.Check(a.Run(nil), jc.ErrorIsNil)
  2071  	}()
  2072  	defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }()
  2073  
  2074  	checkRevision := func() bool {
  2075  		curl := charm.MustParseURL("cs:quantal/mysql")
  2076  		placeholder, err := s.State.LatestPlaceholderCharm(curl)
  2077  		return err == nil && placeholder.String() == curl.WithRevision(23).String()
  2078  	}
  2079  	success := false
  2080  	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
  2081  		if success = checkRevision(); success {
  2082  			break
  2083  		}
  2084  	}
  2085  	c.Assert(success, jc.IsTrue)
  2086  }
  2087  
  2088  type mongoSuite struct {
  2089  	coretesting.BaseSuite
  2090  }
  2091  
  2092  func (s *mongoSuite) TestStateWorkerDialSetsWriteMajority(c *gc.C) {
  2093  	s.testStateWorkerDialSetsWriteMajority(c, true)
  2094  }
  2095  
  2096  func (s *mongoSuite) TestStateWorkerDialDoesNotSetWriteMajorityWithoutReplsetConfig(c *gc.C) {
  2097  	s.testStateWorkerDialSetsWriteMajority(c, false)
  2098  }
  2099  
  2100  func (s *mongoSuite) testStateWorkerDialSetsWriteMajority(c *gc.C, configureReplset bool) {
  2101  	inst := gitjujutesting.MgoInstance{
  2102  		EnableJournal: true,
  2103  		Params:        []string{"--replSet", "juju"},
  2104  	}
  2105  	err := inst.Start(coretesting.Certs)
  2106  	c.Assert(err, jc.ErrorIsNil)
  2107  	defer inst.Destroy()
  2108  
  2109  	var expectedWMode string
  2110  	dialOpts := stateWorkerDialOpts
  2111  	if configureReplset {
  2112  		info := inst.DialInfo()
  2113  		args := peergrouper.InitiateMongoParams{
  2114  			DialInfo:       info,
  2115  			MemberHostPort: inst.Addr(),
  2116  		}
  2117  		err = peergrouper.MaybeInitiateMongoServer(args)
  2118  		c.Assert(err, jc.ErrorIsNil)
  2119  		expectedWMode = "majority"
  2120  	} else {
  2121  		dialOpts.Direct = true
  2122  	}
  2123  
  2124  	mongoInfo := mongo.Info{
  2125  		Addrs:  []string{inst.Addr()},
  2126  		CACert: coretesting.CACert,
  2127  	}
  2128  	session, err := mongo.DialWithInfo(mongoInfo, dialOpts)
  2129  	c.Assert(err, jc.ErrorIsNil)
  2130  	defer session.Close()
  2131  
  2132  	safe := session.Safe()
  2133  	c.Assert(safe, gc.NotNil)
  2134  	c.Assert(safe.WMode, gc.Equals, expectedWMode)
  2135  	c.Assert(safe.J, jc.IsTrue) // always enabled
  2136  }
  2137  
  2138  type shouldWriteProxyFilesSuite struct {
  2139  	coretesting.BaseSuite
  2140  }
  2141  
  2142  var _ = gc.Suite(&shouldWriteProxyFilesSuite{})
  2143  
  2144  func (s *shouldWriteProxyFilesSuite) TestAll(c *gc.C) {
  2145  	tests := []struct {
  2146  		description  string
  2147  		providerType string
  2148  		machineId    string
  2149  		expect       bool
  2150  	}{{
  2151  		description:  "local provider machine 0 must not write",
  2152  		providerType: "local",
  2153  		machineId:    "0",
  2154  		expect:       false,
  2155  	}, {
  2156  		description:  "local provider other machine must write 1",
  2157  		providerType: "local",
  2158  		machineId:    "0/kvm/0",
  2159  		expect:       true,
  2160  	}, {
  2161  		description:  "local provider other machine must write 2",
  2162  		providerType: "local",
  2163  		machineId:    "123",
  2164  		expect:       true,
  2165  	}, {
  2166  		description:  "other provider machine 0 must write",
  2167  		providerType: "anything",
  2168  		machineId:    "0",
  2169  		expect:       true,
  2170  	}, {
  2171  		description:  "other provider other machine must write 1",
  2172  		providerType: "dummy",
  2173  		machineId:    "0/kvm/0",
  2174  		expect:       true,
  2175  	}, {
  2176  		description:  "other provider other machine must write 2",
  2177  		providerType: "blahblahblah",
  2178  		machineId:    "123",
  2179  		expect:       true,
  2180  	}}
  2181  	for i, test := range tests {
  2182  		c.Logf("test %d: %s", i, test.description)
  2183  		mockConf := &mockAgentConfig{
  2184  			providerType: test.providerType,
  2185  			tag:          names.NewMachineTag(test.machineId),
  2186  		}
  2187  		c.Check(shouldWriteProxyFiles(mockConf), gc.Equals, test.expect)
  2188  	}
  2189  }
  2190  
  2191  type mockAgentConfig struct {
  2192  	agent.Config
  2193  	providerType string
  2194  	tag          names.Tag
  2195  }
  2196  
  2197  func (m *mockAgentConfig) Tag() names.Tag {
  2198  	return m.tag
  2199  }
  2200  
  2201  func (m *mockAgentConfig) Value(key string) string {
  2202  	if key == agent.ProviderType {
  2203  		return m.providerType
  2204  	}
  2205  	return ""
  2206  }
  2207  
  2208  type singularRunnerRecord struct {
  2209  	runnerC chan *fakeSingularRunner
  2210  }
  2211  
  2212  func newSingularRunnerRecord() *singularRunnerRecord {
  2213  	return &singularRunnerRecord{
  2214  		runnerC: make(chan *fakeSingularRunner, 5),
  2215  	}
  2216  }
  2217  
  2218  func (r *singularRunnerRecord) newSingularRunner(runner worker.Runner, conn singular.Conn) (worker.Runner, error) {
  2219  	sr, err := singular.New(runner, conn)
  2220  	if err != nil {
  2221  		return nil, err
  2222  	}
  2223  	fakeRunner := &fakeSingularRunner{
  2224  		Runner: sr,
  2225  		startC: make(chan string, 64),
  2226  	}
  2227  	r.runnerC <- fakeRunner
  2228  	return fakeRunner, nil
  2229  }
  2230  
  2231  // nextRunner blocks until a new singular runner is created.
  2232  func (r *singularRunnerRecord) nextRunner(c *gc.C) *fakeSingularRunner {
  2233  	for {
  2234  		select {
  2235  		case r := <-r.runnerC:
  2236  			return r
  2237  		case <-time.After(coretesting.LongWait):
  2238  			c.Fatal("timed out waiting for singular runner to be created")
  2239  		}
  2240  	}
  2241  }
  2242  
  2243  type fakeSingularRunner struct {
  2244  	worker.Runner
  2245  	startC chan string
  2246  }
  2247  
  2248  func (r *fakeSingularRunner) StartWorker(name string, start func() (worker.Worker, error)) error {
  2249  	r.startC <- name
  2250  	return r.Runner.StartWorker(name, start)
  2251  }
  2252  
  2253  // waitForWorker waits for a given worker to be started, returning all
  2254  // workers started while waiting.
  2255  func (r *fakeSingularRunner) waitForWorker(c *gc.C, target string) []string {
  2256  	var seen []string
  2257  	timeout := time.After(coretesting.LongWait)
  2258  	for {
  2259  		select {
  2260  		case workerName := <-r.startC:
  2261  			seen = append(seen, workerName)
  2262  			if workerName == target {
  2263  				return seen
  2264  			}
  2265  		case <-timeout:
  2266  			c.Fatal("timed out waiting for " + target)
  2267  		}
  2268  	}
  2269  }
  2270  
  2271  // waitForWorkers waits for a given worker to be started, returning all
  2272  // workers started while waiting.
  2273  func (r *fakeSingularRunner) waitForWorkers(c *gc.C, targets []string) []string {
  2274  	var seen []string
  2275  	seenTargets := make(map[string]bool)
  2276  	numSeenTargets := 0
  2277  	timeout := time.After(coretesting.LongWait)
  2278  	for {
  2279  		select {
  2280  		case workerName := <-r.startC:
  2281  			if seenTargets[workerName] == true {
  2282  				c.Fatal("worker started twice: " + workerName)
  2283  			}
  2284  			seenTargets[workerName] = true
  2285  			numSeenTargets++
  2286  			seen = append(seen, workerName)
  2287  			if numSeenTargets == len(targets) {
  2288  				return seen
  2289  			}
  2290  		case <-timeout:
  2291  			c.Fatalf("timed out waiting for %v", targets)
  2292  		}
  2293  	}
  2294  }
  2295  
  2296  type mockMetricAPI struct {
  2297  	stop          chan struct{}
  2298  	cleanUpCalled chan struct{}
  2299  	sendCalled    chan struct{}
  2300  }
  2301  
  2302  func newMockMetricAPI() *mockMetricAPI {
  2303  	return &mockMetricAPI{
  2304  		stop:          make(chan struct{}),
  2305  		cleanUpCalled: make(chan struct{}),
  2306  		sendCalled:    make(chan struct{}),
  2307  	}
  2308  }
  2309  
  2310  func (m *mockMetricAPI) CleanupOldMetrics() error {
  2311  	go func() {
  2312  		select {
  2313  		case m.cleanUpCalled <- struct{}{}:
  2314  		case <-m.stop:
  2315  			break
  2316  		}
  2317  	}()
  2318  	return nil
  2319  }
  2320  
  2321  func (m *mockMetricAPI) SendMetrics() error {
  2322  	go func() {
  2323  		select {
  2324  		case m.sendCalled <- struct{}{}:
  2325  		case <-m.stop:
  2326  			break
  2327  		}
  2328  	}()
  2329  	return nil
  2330  }
  2331  
  2332  func (m *mockMetricAPI) SendCalled() <-chan struct{} {
  2333  	return m.sendCalled
  2334  }
  2335  
  2336  func (m *mockMetricAPI) CleanupCalled() <-chan struct{} {
  2337  	return m.cleanUpCalled
  2338  }
  2339  
  2340  func (m *mockMetricAPI) Stop() {
  2341  	close(m.stop)
  2342  }
  2343  
  2344  func mkdtemp(prefix string) string {
  2345  	d, err := ioutil.TempDir("", prefix)
  2346  	if err != nil {
  2347  		panic(err)
  2348  	}
  2349  	return d
  2350  }
  2351  
  2352  func mktemp(prefix string, content string) string {
  2353  	f, err := ioutil.TempFile("", prefix)
  2354  	if err != nil {
  2355  		panic(err)
  2356  	}
  2357  	_, err = f.WriteString(content)
  2358  	if err != nil {
  2359  		panic(err)
  2360  	}
  2361  	f.Close()
  2362  	return f.Name()
  2363  }
  2364  
  2365  type mockLoopDeviceManager struct {
  2366  	detachLoopDevicesArgRootfs string
  2367  	detachLoopDevicesArgPrefix string
  2368  }
  2369  
  2370  func (m *mockLoopDeviceManager) DetachLoopDevices(rootfs, prefix string) error {
  2371  	m.detachLoopDevicesArgRootfs = rootfs
  2372  	m.detachLoopDevicesArgPrefix = prefix
  2373  	return nil
  2374  }