github.com/wallyworld/juju@v0.0.0-20161013125918-6cf1bc9d917a/state/unit.go (about)

     1  // Copyright 2012-2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package state
     5  
     6  import (
     7  	"fmt"
     8  	"sort"
     9  	"time"
    10  
    11  	"github.com/juju/errors"
    12  	"github.com/juju/loggo"
    13  	jujutxn "github.com/juju/txn"
    14  	"github.com/juju/utils"
    15  	"github.com/juju/utils/set"
    16  	"github.com/juju/version"
    17  	"gopkg.in/juju/charm.v6-unstable"
    18  	"gopkg.in/juju/names.v2"
    19  	"gopkg.in/mgo.v2"
    20  	"gopkg.in/mgo.v2/bson"
    21  	"gopkg.in/mgo.v2/txn"
    22  
    23  	"github.com/juju/juju/constraints"
    24  	"github.com/juju/juju/core/actions"
    25  	"github.com/juju/juju/instance"
    26  	"github.com/juju/juju/network"
    27  	"github.com/juju/juju/state/presence"
    28  	"github.com/juju/juju/status"
    29  	"github.com/juju/juju/tools"
    30  )
    31  
    32  var unitLogger = loggo.GetLogger("juju.state.unit")
    33  
    34  // AssignmentPolicy controls what machine a unit will be assigned to.
    35  type AssignmentPolicy string
    36  
    37  const (
    38  	// AssignLocal indicates that all service units should be assigned
    39  	// to machine 0.
    40  	AssignLocal AssignmentPolicy = "local"
    41  
    42  	// AssignClean indicates that every service unit should be assigned
    43  	// to a machine which never previously has hosted any units, and that
    44  	// new machines should be launched if required.
    45  	AssignClean AssignmentPolicy = "clean"
    46  
    47  	// AssignCleanEmpty indicates that every service unit should be assigned
    48  	// to a machine which never previously has hosted any units, and which is not
    49  	// currently hosting any containers, and that new machines should be launched if required.
    50  	AssignCleanEmpty AssignmentPolicy = "clean-empty"
    51  
    52  	// AssignNew indicates that every service unit should be assigned to a new
    53  	// dedicated machine.  A new machine will be launched for each new unit.
    54  	AssignNew AssignmentPolicy = "new"
    55  )
    56  
    57  // ResolvedMode describes the way state transition errors
    58  // are resolved.
    59  type ResolvedMode string
    60  
    61  // These are available ResolvedMode values.
    62  const (
    63  	ResolvedNone       ResolvedMode = ""
    64  	ResolvedRetryHooks ResolvedMode = "retry-hooks"
    65  	ResolvedNoHooks    ResolvedMode = "no-hooks"
    66  )
    67  
    68  // port identifies a network port number for a particular protocol.
    69  // TODO(mue) Not really used anymore, se bellow. Can be removed when
    70  // cleaning unitDoc.
    71  type port struct {
    72  	Protocol string `bson:"protocol"`
    73  	Number   int    `bson:"number"`
    74  }
    75  
    76  // unitDoc represents the internal state of a unit in MongoDB.
    77  // Note the correspondence with UnitInfo in apiserver/params.
    78  type unitDoc struct {
    79  	DocID                  string `bson:"_id"`
    80  	Name                   string `bson:"name"`
    81  	ModelUUID              string `bson:"model-uuid"`
    82  	Application            string
    83  	Series                 string
    84  	CharmURL               *charm.URL
    85  	Principal              string
    86  	Subordinates           []string
    87  	StorageAttachmentCount int `bson:"storageattachmentcount"`
    88  	MachineId              string
    89  	Resolved               ResolvedMode
    90  	Tools                  *tools.Tools `bson:",omitempty"`
    91  	Life                   Life
    92  	TxnRevno               int64 `bson:"txn-revno"`
    93  	PasswordHash           string
    94  }
    95  
    96  // Unit represents the state of a service unit.
    97  type Unit struct {
    98  	st  *State
    99  	doc unitDoc
   100  }
   101  
   102  func newUnit(st *State, udoc *unitDoc) *Unit {
   103  	unit := &Unit{
   104  		st:  st,
   105  		doc: *udoc,
   106  	}
   107  	return unit
   108  }
   109  
   110  // Application returns the application.
   111  func (u *Unit) Application() (*Application, error) {
   112  	return u.st.Application(u.doc.Application)
   113  }
   114  
   115  // ConfigSettings returns the complete set of service charm config settings
   116  // available to the unit. Unset values will be replaced with the default
   117  // value for the associated option, and may thus be nil when no default is
   118  // specified.
   119  func (u *Unit) ConfigSettings() (charm.Settings, error) {
   120  	if u.doc.CharmURL == nil {
   121  		return nil, fmt.Errorf("unit charm not set")
   122  	}
   123  	settings, err := readSettings(u.st, settingsC, applicationSettingsKey(u.doc.Application, u.doc.CharmURL))
   124  	if err != nil {
   125  		return nil, err
   126  	}
   127  	chrm, err := u.st.Charm(u.doc.CharmURL)
   128  	if err != nil {
   129  		return nil, err
   130  	}
   131  	result := chrm.Config().DefaultSettings()
   132  	for name, value := range settings.Map() {
   133  		result[name] = value
   134  	}
   135  	return result, nil
   136  }
   137  
   138  // ApplicationName returns the application name.
   139  func (u *Unit) ApplicationName() string {
   140  	return u.doc.Application
   141  }
   142  
   143  // Series returns the deployed charm's series.
   144  func (u *Unit) Series() string {
   145  	return u.doc.Series
   146  }
   147  
   148  // String returns the unit as string.
   149  func (u *Unit) String() string {
   150  	return u.doc.Name
   151  }
   152  
   153  // Name returns the unit name.
   154  func (u *Unit) Name() string {
   155  	return u.doc.Name
   156  }
   157  
   158  // unitGlobalKey returns the global database key for the named unit.
   159  func unitGlobalKey(name string) string {
   160  	return "u#" + name + "#charm"
   161  }
   162  
   163  // globalWorkloadVersionKey returns the global database key for the
   164  // workload version status key for this unit.
   165  func globalWorkloadVersionKey(name string) string {
   166  	return unitGlobalKey(name) + "#sat#workload-version"
   167  }
   168  
   169  // globalAgentKey returns the global database key for the unit.
   170  func (u *Unit) globalAgentKey() string {
   171  	return unitAgentGlobalKey(u.doc.Name)
   172  }
   173  
   174  // globalMeterStatusKey returns the global database key for the meter status of the unit.
   175  func (u *Unit) globalMeterStatusKey() string {
   176  	return unitAgentGlobalKey(u.doc.Name)
   177  }
   178  
   179  // globalKey returns the global database key for the unit.
   180  func (u *Unit) globalKey() string {
   181  	return unitGlobalKey(u.doc.Name)
   182  }
   183  
   184  // globalWorkloadVersionKey returns the global database key for the unit's
   185  // workload version info.
   186  func (u *Unit) globalWorkloadVersionKey() string {
   187  	return globalWorkloadVersionKey(u.doc.Name)
   188  }
   189  
   190  // Life returns whether the unit is Alive, Dying or Dead.
   191  func (u *Unit) Life() Life {
   192  	return u.doc.Life
   193  }
   194  
   195  // WorkloadVersion returns the version of the running workload set by
   196  // the charm (eg, the version of postgresql that is running, as
   197  // opposed to the version of the postgresql charm).
   198  func (u *Unit) WorkloadVersion() (string, error) {
   199  	status, err := getStatus(u.st, u.globalWorkloadVersionKey(), "workload")
   200  	if errors.IsNotFound(err) {
   201  		return "", nil
   202  	} else if err != nil {
   203  		return "", errors.Trace(err)
   204  	}
   205  	return status.Message, nil
   206  }
   207  
   208  // SetWorkloadVersion sets the version of the workload that the unit
   209  // is currently running.
   210  func (u *Unit) SetWorkloadVersion(version string) error {
   211  	// Store in status rather than an attribute of the unit doc - we
   212  	// want to avoid everything being an attr of the main docs to
   213  	// stop a swarm of watchers being notified for irrelevant changes.
   214  	now := u.st.clock.Now()
   215  	return setStatus(u.st, setStatusParams{
   216  		badge:     "workload",
   217  		globalKey: u.globalWorkloadVersionKey(),
   218  		status:    status.Active,
   219  		message:   version,
   220  		updated:   &now,
   221  	})
   222  }
   223  
   224  // WorkloadVersionHistory returns a HistoryGetter which enables the
   225  // caller to request past workload version changes.
   226  func (u *Unit) WorkloadVersionHistory() *HistoryGetter {
   227  	return &HistoryGetter{st: u.st, globalKey: u.globalWorkloadVersionKey()}
   228  }
   229  
   230  // AgentTools returns the tools that the agent is currently running.
   231  // It an error that satisfies errors.IsNotFound if the tools have not
   232  // yet been set.
   233  func (u *Unit) AgentTools() (*tools.Tools, error) {
   234  	if u.doc.Tools == nil {
   235  		return nil, errors.NotFoundf("agent tools for unit %q", u)
   236  	}
   237  	tools := *u.doc.Tools
   238  	return &tools, nil
   239  }
   240  
   241  // SetAgentVersion sets the version of juju that the agent is
   242  // currently running.
   243  func (u *Unit) SetAgentVersion(v version.Binary) (err error) {
   244  	defer errors.DeferredAnnotatef(&err, "cannot set agent version for unit %q", u)
   245  	if err = checkVersionValidity(v); err != nil {
   246  		return err
   247  	}
   248  	tools := &tools.Tools{Version: v}
   249  	ops := []txn.Op{{
   250  		C:      unitsC,
   251  		Id:     u.doc.DocID,
   252  		Assert: notDeadDoc,
   253  		Update: bson.D{{"$set", bson.D{{"tools", tools}}}},
   254  	}}
   255  	if err := u.st.runTransaction(ops); err != nil {
   256  		return onAbort(err, ErrDead)
   257  	}
   258  	u.doc.Tools = tools
   259  	return nil
   260  }
   261  
   262  // SetPassword sets the password for the machine's agent.
   263  func (u *Unit) SetPassword(password string) error {
   264  	if len(password) < utils.MinAgentPasswordLength {
   265  		return fmt.Errorf("password is only %d bytes long, and is not a valid Agent password", len(password))
   266  	}
   267  	return u.setPasswordHash(utils.AgentPasswordHash(password))
   268  }
   269  
   270  // setPasswordHash sets the underlying password hash in the database directly
   271  // to the value supplied. This is split out from SetPassword to allow direct
   272  // manipulation in tests (to check for backwards compatibility).
   273  func (u *Unit) setPasswordHash(passwordHash string) error {
   274  	ops := []txn.Op{{
   275  		C:      unitsC,
   276  		Id:     u.doc.DocID,
   277  		Assert: notDeadDoc,
   278  		Update: bson.D{{"$set", bson.D{{"passwordhash", passwordHash}}}},
   279  	}}
   280  	err := u.st.runTransaction(ops)
   281  	if err != nil {
   282  		return fmt.Errorf("cannot set password of unit %q: %v", u, onAbort(err, ErrDead))
   283  	}
   284  	u.doc.PasswordHash = passwordHash
   285  	return nil
   286  }
   287  
   288  // Return the underlying PasswordHash stored in the database. Used by the test
   289  // suite to check that the PasswordHash gets properly updated to new values
   290  // when compatibility mode is detected.
   291  func (u *Unit) getPasswordHash() string {
   292  	return u.doc.PasswordHash
   293  }
   294  
   295  // PasswordValid returns whether the given password is valid
   296  // for the given unit.
   297  func (u *Unit) PasswordValid(password string) bool {
   298  	agentHash := utils.AgentPasswordHash(password)
   299  	if agentHash == u.doc.PasswordHash {
   300  		return true
   301  	}
   302  	return false
   303  }
   304  
   305  // Destroy, when called on a Alive unit, advances its lifecycle as far as
   306  // possible; it otherwise has no effect. In most situations, the unit's
   307  // life is just set to Dying; but if a principal unit that is not assigned
   308  // to a provisioned machine is Destroyed, it will be removed from state
   309  // directly.
   310  func (u *Unit) Destroy() (err error) {
   311  	defer func() {
   312  		if err == nil {
   313  			// This is a white lie; the document might actually be removed.
   314  			u.doc.Life = Dying
   315  		}
   316  	}()
   317  	unit := &Unit{st: u.st, doc: u.doc}
   318  	buildTxn := func(attempt int) ([]txn.Op, error) {
   319  		if attempt > 0 {
   320  			if err := unit.Refresh(); errors.IsNotFound(err) {
   321  				return nil, jujutxn.ErrNoOperations
   322  			} else if err != nil {
   323  				return nil, err
   324  			}
   325  		}
   326  		switch ops, err := unit.destroyOps(); err {
   327  		case errRefresh:
   328  		case errAlreadyDying:
   329  			return nil, jujutxn.ErrNoOperations
   330  		case nil:
   331  			return ops, nil
   332  		default:
   333  			return nil, err
   334  		}
   335  		return nil, jujutxn.ErrNoOperations
   336  	}
   337  	if err = unit.st.run(buildTxn); err == nil {
   338  		if historyErr := unit.eraseHistory(); historyErr != nil {
   339  			logger.Errorf("cannot delete history for unit %q: %v", unit.globalKey(), err)
   340  		}
   341  		if err = unit.Refresh(); errors.IsNotFound(err) {
   342  			return nil
   343  		}
   344  	}
   345  	return err
   346  }
   347  
   348  func (u *Unit) eraseHistory() error {
   349  	history, closer := u.st.getCollection(statusesHistoryC)
   350  	defer closer()
   351  	historyW := history.Writeable()
   352  
   353  	if _, err := historyW.RemoveAll(bson.D{{"statusid", u.globalKey()}}); err != nil {
   354  		return err
   355  	}
   356  	if _, err := historyW.RemoveAll(bson.D{{"statusid", u.globalAgentKey()}}); err != nil {
   357  		return err
   358  	}
   359  	return nil
   360  }
   361  
   362  // destroyOps returns the operations required to destroy the unit. If it
   363  // returns errRefresh, the unit should be refreshed and the destruction
   364  // operations recalculated.
   365  func (u *Unit) destroyOps() ([]txn.Op, error) {
   366  	if u.doc.Life != Alive {
   367  		return nil, errAlreadyDying
   368  	}
   369  
   370  	// Where possible, we'd like to be able to short-circuit unit destruction
   371  	// such that units can be removed directly rather than waiting for their
   372  	// agents to start, observe Dying, set Dead, and shut down; this takes a
   373  	// long time and is vexing to users. This turns out to be possible if and
   374  	// only if the unit agent has not yet set its status; this implies that the
   375  	// most the unit could possibly have done is to run its install hook.
   376  	//
   377  	// There's no harm in removing a unit that's run its install hook only --
   378  	// or, at least, there is no more harm than there is in removing a unit
   379  	// that's run its stop hook, and that's the usual condition.
   380  	//
   381  	// Principals with subordinates are never eligible for this shortcut,
   382  	// because the unit agent must inevitably have set a status before getting
   383  	// to the point where it can actually create its subordinate.
   384  	//
   385  	// Subordinates should be eligible for the shortcut but are not currently
   386  	// considered, on the basis that (1) they were created by active principals
   387  	// and can be expected to be deployed pretty soon afterwards, so we don't
   388  	// lose much time and (2) by maintaining this restriction, I can reduce
   389  	// the number of tests that have to change and defer that improvement to
   390  	// its own CL.
   391  	minUnitsOp := minUnitsTriggerOp(u.st, u.ApplicationName())
   392  	cleanupOp := newCleanupOp(cleanupDyingUnit, u.doc.Name)
   393  	setDyingOp := txn.Op{
   394  		C:      unitsC,
   395  		Id:     u.doc.DocID,
   396  		Assert: isAliveDoc,
   397  		Update: bson.D{{"$set", bson.D{{"life", Dying}}}},
   398  	}
   399  	setDyingOps := []txn.Op{setDyingOp, cleanupOp, minUnitsOp}
   400  	if u.doc.Principal != "" {
   401  		return setDyingOps, nil
   402  	} else if len(u.doc.Subordinates)+u.doc.StorageAttachmentCount != 0 {
   403  		return setDyingOps, nil
   404  	}
   405  
   406  	// See if the unit agent has started running.
   407  	// If so then we can't set directly to dead.
   408  	agentStatusDocId := u.globalAgentKey()
   409  	agentStatusInfo, agentErr := getStatus(u.st, agentStatusDocId, "agent")
   410  	if errors.IsNotFound(agentErr) {
   411  		return nil, errAlreadyDying
   412  	} else if agentErr != nil {
   413  		return nil, errors.Trace(agentErr)
   414  	}
   415  	if agentStatusInfo.Status != status.Allocating {
   416  		return setDyingOps, nil
   417  	}
   418  
   419  	ops := []txn.Op{{
   420  		C:      statusesC,
   421  		Id:     u.st.docID(agentStatusDocId),
   422  		Assert: bson.D{{"status", status.Allocating}},
   423  	}, minUnitsOp}
   424  	removeAsserts := append(isAliveDoc, bson.DocElem{
   425  		"$and", []bson.D{
   426  			unitHasNoSubordinates,
   427  			unitHasNoStorageAttachments,
   428  		},
   429  	})
   430  	removeOps, err := u.removeOps(removeAsserts)
   431  	if err == errAlreadyRemoved {
   432  		return nil, errAlreadyDying
   433  	} else if err != nil {
   434  		return nil, err
   435  	}
   436  	return append(ops, removeOps...), nil
   437  }
   438  
   439  // destroyHostOps returns all necessary operations to destroy the service unit's host machine,
   440  // or ensure that the conditions preventing its destruction remain stable through the transaction.
   441  func (u *Unit) destroyHostOps(s *Application) (ops []txn.Op, err error) {
   442  	if s.doc.Subordinate {
   443  		return []txn.Op{{
   444  			C:      unitsC,
   445  			Id:     u.st.docID(u.doc.Principal),
   446  			Assert: txn.DocExists,
   447  			Update: bson.D{{"$pull", bson.D{{"subordinates", u.doc.Name}}}},
   448  		}}, nil
   449  	} else if u.doc.MachineId == "" {
   450  		unitLogger.Tracef("unit %v unassigned", u)
   451  		return nil, nil
   452  	}
   453  
   454  	machineUpdate := bson.D{{"$pull", bson.D{{"principals", u.doc.Name}}}}
   455  
   456  	m, err := u.st.Machine(u.doc.MachineId)
   457  	if err != nil {
   458  		if errors.IsNotFound(err) {
   459  			return nil, nil
   460  		}
   461  		return nil, err
   462  	}
   463  
   464  	containerCheck := true // whether container conditions allow destroying the host machine
   465  	containers, err := m.Containers()
   466  	if err != nil {
   467  		return nil, err
   468  	}
   469  	if len(containers) > 0 {
   470  		ops = append(ops, txn.Op{
   471  			C:      containerRefsC,
   472  			Id:     m.doc.DocID,
   473  			Assert: bson.D{{"children.0", bson.D{{"$exists", 1}}}},
   474  		})
   475  		containerCheck = false
   476  	} else {
   477  		ops = append(ops, txn.Op{
   478  			C:  containerRefsC,
   479  			Id: m.doc.DocID,
   480  			Assert: bson.D{{"$or", []bson.D{
   481  				{{"children", bson.D{{"$size", 0}}}},
   482  				{{"children", bson.D{{"$exists", false}}}},
   483  			}}},
   484  		})
   485  	}
   486  
   487  	machineCheck := true // whether host machine conditions allow destroy
   488  	if len(m.doc.Principals) != 1 || m.doc.Principals[0] != u.doc.Name {
   489  		machineCheck = false
   490  	} else if hasJob(m.doc.Jobs, JobManageModel) {
   491  		// Check that the machine does not have any responsibilities that
   492  		// prevent a lifecycle change.
   493  		machineCheck = false
   494  	} else if m.doc.HasVote {
   495  		machineCheck = false
   496  	}
   497  
   498  	// assert that the machine conditions pertaining to host removal conditions
   499  	// remain the same throughout the transaction.
   500  	var machineAssert bson.D
   501  	if machineCheck {
   502  		machineAssert = bson.D{{"$and", []bson.D{
   503  			{{"principals", []string{u.doc.Name}}},
   504  			{{"jobs", bson.D{{"$nin", []MachineJob{JobManageModel}}}}},
   505  			{{"hasvote", bson.D{{"$ne", true}}}},
   506  		}}}
   507  	} else {
   508  		machineAssert = bson.D{{"$or", []bson.D{
   509  			{{"principals", bson.D{{"$ne", []string{u.doc.Name}}}}},
   510  			{{"jobs", bson.D{{"$in", []MachineJob{JobManageModel}}}}},
   511  			{{"hasvote", true}},
   512  		}}}
   513  	}
   514  
   515  	// If removal conditions satisfied by machine & container docs, we can
   516  	// destroy it, in addition to removing the unit principal.
   517  	if machineCheck && containerCheck {
   518  		machineUpdate = append(machineUpdate, bson.D{{"$set", bson.D{{"life", Dying}}}}...)
   519  	}
   520  
   521  	ops = append(ops, txn.Op{
   522  		C:      machinesC,
   523  		Id:     m.doc.DocID,
   524  		Assert: machineAssert,
   525  		Update: machineUpdate,
   526  	})
   527  	return ops, nil
   528  }
   529  
   530  // removeOps returns the operations necessary to remove the unit, assuming
   531  // the supplied asserts apply to the unit document.
   532  func (u *Unit) removeOps(asserts bson.D) ([]txn.Op, error) {
   533  	svc, err := u.st.Application(u.doc.Application)
   534  	if errors.IsNotFound(err) {
   535  		// If the service has been removed, the unit must already have been.
   536  		return nil, errAlreadyRemoved
   537  	} else if err != nil {
   538  		return nil, err
   539  	}
   540  	return svc.removeUnitOps(u, asserts)
   541  }
   542  
   543  // ErrUnitHasSubordinates is a standard error to indicate that a Unit
   544  // cannot complete an operation to end its life because it still has
   545  // subordinate services
   546  var ErrUnitHasSubordinates = errors.New("unit has subordinates")
   547  
   548  var unitHasNoSubordinates = bson.D{{
   549  	"$or", []bson.D{
   550  		{{"subordinates", bson.D{{"$size", 0}}}},
   551  		{{"subordinates", bson.D{{"$exists", false}}}},
   552  	},
   553  }}
   554  
   555  // ErrUnitHasStorageAttachments is a standard error to indicate that
   556  // a Unit cannot complete an operation to end its life because it still
   557  // has storage attachments.
   558  var ErrUnitHasStorageAttachments = errors.New("unit has storage attachments")
   559  
   560  var unitHasNoStorageAttachments = bson.D{{
   561  	"$or", []bson.D{
   562  		{{"storageattachmentcount", 0}},
   563  		{{"storageattachmentcount", bson.D{{"$exists", false}}}},
   564  	},
   565  }}
   566  
   567  // EnsureDead sets the unit lifecycle to Dead if it is Alive or Dying.
   568  // It does nothing otherwise. If the unit has subordinates, it will
   569  // return ErrUnitHasSubordinates; otherwise, if it has storage instances,
   570  // it will return ErrUnitHasStorageInstances.
   571  func (u *Unit) EnsureDead() (err error) {
   572  	if u.doc.Life == Dead {
   573  		return nil
   574  	}
   575  	defer func() {
   576  		if err == nil {
   577  			u.doc.Life = Dead
   578  		}
   579  	}()
   580  	assert := append(notDeadDoc, bson.DocElem{
   581  		"$and", []bson.D{
   582  			unitHasNoSubordinates,
   583  			unitHasNoStorageAttachments,
   584  		},
   585  	})
   586  	ops := []txn.Op{{
   587  		C:      unitsC,
   588  		Id:     u.doc.DocID,
   589  		Assert: assert,
   590  		Update: bson.D{{"$set", bson.D{{"life", Dead}}}},
   591  	}}
   592  	if err := u.st.runTransaction(ops); err != txn.ErrAborted {
   593  		return err
   594  	}
   595  	if notDead, err := isNotDead(u.st, unitsC, u.doc.DocID); err != nil {
   596  		return err
   597  	} else if !notDead {
   598  		return nil
   599  	}
   600  	if err := u.Refresh(); errors.IsNotFound(err) {
   601  		return nil
   602  	} else if err != nil {
   603  		return err
   604  	}
   605  	if len(u.doc.Subordinates) > 0 {
   606  		return ErrUnitHasSubordinates
   607  	}
   608  	return ErrUnitHasStorageAttachments
   609  }
   610  
   611  // Remove removes the unit from state, and may remove its service as well, if
   612  // the service is Dying and no other references to it exist. It will fail if
   613  // the unit is not Dead.
   614  func (u *Unit) Remove() (err error) {
   615  	defer errors.DeferredAnnotatef(&err, "cannot remove unit %q", u)
   616  	if u.doc.Life != Dead {
   617  		return errors.New("unit is not dead")
   618  	}
   619  
   620  	// Now the unit is Dead, we can be sure that it's impossible for it to
   621  	// enter relation scopes (once it's Dying, we can be sure of this; but
   622  	// EnsureDead does not require that it already be Dying, so this is the
   623  	// only point at which we can safely backstop lp:1233457 and mitigate
   624  	// the impact of unit agent bugs that leave relation scopes occupied).
   625  	relations, err := applicationRelations(u.st, u.doc.Application)
   626  	if err != nil {
   627  		return err
   628  	}
   629  	for _, rel := range relations {
   630  		ru, err := rel.Unit(u)
   631  		if err != nil {
   632  			return err
   633  		}
   634  		if err := ru.LeaveScope(); err != nil {
   635  			return err
   636  		}
   637  	}
   638  
   639  	// Now we're sure we haven't left any scopes occupied by this unit, we
   640  	// can safely remove the document.
   641  	unit := &Unit{st: u.st, doc: u.doc}
   642  	buildTxn := func(attempt int) ([]txn.Op, error) {
   643  		if attempt > 0 {
   644  			if err := unit.Refresh(); errors.IsNotFound(err) {
   645  				return nil, jujutxn.ErrNoOperations
   646  			} else if err != nil {
   647  				return nil, err
   648  			}
   649  		}
   650  		switch ops, err := unit.removeOps(isDeadDoc); err {
   651  		case errRefresh:
   652  		case errAlreadyDying:
   653  			return nil, jujutxn.ErrNoOperations
   654  		case nil:
   655  			return ops, nil
   656  		default:
   657  			return nil, err
   658  		}
   659  		return nil, jujutxn.ErrNoOperations
   660  	}
   661  	return unit.st.run(buildTxn)
   662  }
   663  
   664  // Resolved returns the resolved mode for the unit.
   665  func (u *Unit) Resolved() ResolvedMode {
   666  	return u.doc.Resolved
   667  }
   668  
   669  // IsPrincipal returns whether the unit is deployed in its own container,
   670  // and can therefore have subordinate services deployed alongside it.
   671  func (u *Unit) IsPrincipal() bool {
   672  	return u.doc.Principal == ""
   673  }
   674  
   675  // SubordinateNames returns the names of any subordinate units.
   676  func (u *Unit) SubordinateNames() []string {
   677  	names := make([]string, len(u.doc.Subordinates))
   678  	copy(names, u.doc.Subordinates)
   679  	return names
   680  }
   681  
   682  // RelationsJoined returns the relations for which the unit has entered scope
   683  // and neither left it nor prepared to leave it
   684  func (u *Unit) RelationsJoined() ([]*Relation, error) {
   685  	return u.relations(func(ru *RelationUnit) (bool, error) {
   686  		return ru.Joined()
   687  	})
   688  }
   689  
   690  // RelationsInScope returns the relations for which the unit has entered scope
   691  // and not left it.
   692  func (u *Unit) RelationsInScope() ([]*Relation, error) {
   693  	return u.relations(func(ru *RelationUnit) (bool, error) {
   694  		return ru.InScope()
   695  	})
   696  }
   697  
   698  type relationPredicate func(ru *RelationUnit) (bool, error)
   699  
   700  // relations implements RelationsJoined and RelationsInScope.
   701  func (u *Unit) relations(predicate relationPredicate) ([]*Relation, error) {
   702  	candidates, err := applicationRelations(u.st, u.doc.Application)
   703  	if err != nil {
   704  		return nil, err
   705  	}
   706  	var filtered []*Relation
   707  	for _, relation := range candidates {
   708  		relationUnit, err := relation.Unit(u)
   709  		if err != nil {
   710  			return nil, err
   711  		}
   712  		if include, err := predicate(relationUnit); err != nil {
   713  			return nil, err
   714  		} else if include {
   715  			filtered = append(filtered, relation)
   716  		}
   717  	}
   718  	return filtered, nil
   719  }
   720  
   721  // DeployerTag returns the tag of the agent responsible for deploying
   722  // the unit. If no such entity can be determined, false is returned.
   723  func (u *Unit) DeployerTag() (names.Tag, bool) {
   724  	if u.doc.Principal != "" {
   725  		return names.NewUnitTag(u.doc.Principal), true
   726  	} else if u.doc.MachineId != "" {
   727  		return names.NewMachineTag(u.doc.MachineId), true
   728  	}
   729  	return nil, false
   730  }
   731  
   732  // PrincipalName returns the name of the unit's principal.
   733  // If the unit is not a subordinate, false is returned.
   734  func (u *Unit) PrincipalName() (string, bool) {
   735  	return u.doc.Principal, u.doc.Principal != ""
   736  }
   737  
   738  // machine returns the unit's machine.
   739  //
   740  // machine is part of the machineAssignable interface.
   741  func (u *Unit) machine() (*Machine, error) {
   742  	id, err := u.AssignedMachineId()
   743  	if err != nil {
   744  		return nil, errors.Annotatef(err, "unit %v cannot get assigned machine", u)
   745  	}
   746  	m, err := u.st.Machine(id)
   747  	if err != nil {
   748  		return nil, errors.Annotatef(err, "unit %v misses machine id %v", u, id)
   749  	}
   750  	return m, nil
   751  }
   752  
   753  // noAssignedMachineOp is part of the machineAssignable interface.
   754  func (u *Unit) noAssignedMachineOp() txn.Op {
   755  	id := u.doc.DocID
   756  	if u.doc.Principal != "" {
   757  		id = u.doc.Principal
   758  	}
   759  	return txn.Op{
   760  		C:      unitsC,
   761  		Id:     id,
   762  		Assert: bson.D{{"machineid", ""}},
   763  	}
   764  }
   765  
   766  // PublicAddress returns the public address of the unit.
   767  func (u *Unit) PublicAddress() (network.Address, error) {
   768  	m, err := u.machine()
   769  	if err != nil {
   770  		unitLogger.Tracef("%v", err)
   771  		return network.Address{}, errors.Trace(err)
   772  	}
   773  	return m.PublicAddress()
   774  }
   775  
   776  // PrivateAddress returns the private address of the unit.
   777  func (u *Unit) PrivateAddress() (network.Address, error) {
   778  	m, err := u.machine()
   779  	if err != nil {
   780  		unitLogger.Tracef("%v", err)
   781  		return network.Address{}, errors.Trace(err)
   782  	}
   783  	return m.PrivateAddress()
   784  }
   785  
   786  // AvailabilityZone returns the name of the availability zone into which
   787  // the unit's machine instance was provisioned.
   788  func (u *Unit) AvailabilityZone() (string, error) {
   789  	m, err := u.machine()
   790  	if err != nil {
   791  		return "", errors.Trace(err)
   792  	}
   793  	return m.AvailabilityZone()
   794  }
   795  
   796  // Refresh refreshes the contents of the Unit from the underlying
   797  // state. It an error that satisfies errors.IsNotFound if the unit has
   798  // been removed.
   799  func (u *Unit) Refresh() error {
   800  	units, closer := u.st.getCollection(unitsC)
   801  	defer closer()
   802  
   803  	err := units.FindId(u.doc.DocID).One(&u.doc)
   804  	if err == mgo.ErrNotFound {
   805  		return errors.NotFoundf("unit %q", u)
   806  	}
   807  	if err != nil {
   808  		return fmt.Errorf("cannot refresh unit %q: %v", u, err)
   809  	}
   810  	return nil
   811  }
   812  
   813  // Agent Returns an agent by its unit's name.
   814  func (u *Unit) Agent() *UnitAgent {
   815  	return newUnitAgent(u.st, u.Tag(), u.Name())
   816  }
   817  
   818  // AgentHistory returns an StatusHistoryGetter which can
   819  //be used to query the status history of the unit's agent.
   820  func (u *Unit) AgentHistory() status.StatusHistoryGetter {
   821  	return u.Agent()
   822  }
   823  
   824  // SetAgentStatus calls SetStatus for this unit's agent, this call
   825  // is equivalent to the former call to SetStatus when Agent and Unit
   826  // where not separate entities.
   827  func (u *Unit) SetAgentStatus(agentStatus status.StatusInfo) error {
   828  	agent := newUnitAgent(u.st, u.Tag(), u.Name())
   829  	s := status.StatusInfo{
   830  		Status:  agentStatus.Status,
   831  		Message: agentStatus.Message,
   832  		Data:    agentStatus.Data,
   833  		Since:   agentStatus.Since,
   834  	}
   835  	return agent.SetStatus(s)
   836  }
   837  
   838  // AgentStatus calls Status for this unit's agent, this call
   839  // is equivalent to the former call to Status when Agent and Unit
   840  // where not separate entities.
   841  func (u *Unit) AgentStatus() (status.StatusInfo, error) {
   842  	agent := newUnitAgent(u.st, u.Tag(), u.Name())
   843  	return agent.Status()
   844  }
   845  
   846  // StatusHistory returns a slice of at most <size> StatusInfo items
   847  // or items as old as <date> or items newer than now - <delta> time
   848  // representing past statuses for this unit.
   849  func (u *Unit) StatusHistory(filter status.StatusHistoryFilter) ([]status.StatusInfo, error) {
   850  	args := &statusHistoryArgs{
   851  		st:        u.st,
   852  		globalKey: u.globalKey(),
   853  		filter:    filter,
   854  	}
   855  	return statusHistory(args)
   856  }
   857  
   858  // Status returns the status of the unit.
   859  // This method relies on globalKey instead of globalAgentKey since it is part of
   860  // the effort to separate Unit from UnitAgent. Now the Status for UnitAgent is in
   861  // the UnitAgent struct.
   862  func (u *Unit) Status() (status.StatusInfo, error) {
   863  	// The current health spec says when a hook error occurs, the workload should
   864  	// be in error state, but the state model more correctly records the agent
   865  	// itself as being in error. So we'll do that model translation here.
   866  	// TODO(fwereade) as on unitagent, this transformation does not belong here.
   867  	// For now, pretend we're always reading the unit status.
   868  	info, err := getStatus(u.st, u.globalAgentKey(), "unit")
   869  	if err != nil {
   870  		return status.StatusInfo{}, err
   871  	}
   872  	if info.Status != status.Error {
   873  		info, err = getStatus(u.st, u.globalKey(), "unit")
   874  		if err != nil {
   875  			return status.StatusInfo{}, err
   876  		}
   877  	}
   878  	return info, nil
   879  }
   880  
   881  // SetStatus sets the status of the unit agent. The optional values
   882  // allow to pass additional helpful status data.
   883  // This method relies on globalKey instead of globalAgentKey since it is part of
   884  // the effort to separate Unit from UnitAgent. Now the SetStatus for UnitAgent is in
   885  // the UnitAgent struct.
   886  func (u *Unit) SetStatus(unitStatus status.StatusInfo) error {
   887  	if !status.ValidWorkloadStatus(unitStatus.Status) {
   888  		return errors.Errorf("cannot set invalid status %q", unitStatus.Status)
   889  	}
   890  	return setStatus(u.st, setStatusParams{
   891  		badge:     "unit",
   892  		globalKey: u.globalKey(),
   893  		status:    unitStatus.Status,
   894  		message:   unitStatus.Message,
   895  		rawData:   unitStatus.Data,
   896  		updated:   unitStatus.Since,
   897  	})
   898  }
   899  
   900  // OpenPortsOnSubnet opens the given port range and protocol for the unit on the
   901  // given subnet, which can be empty. When non-empty, subnetID must refer to an
   902  // existing, alive subnet, otherwise an error is returned. Returns an error if
   903  // opening the requested range conflicts with another already opened range on
   904  // the same subnet and and the unit's assigned machine.
   905  func (u *Unit) OpenPortsOnSubnet(subnetID, protocol string, fromPort, toPort int) (err error) {
   906  	ports, err := NewPortRange(u.Name(), fromPort, toPort, protocol)
   907  	if err != nil {
   908  		return errors.Annotatef(err, "invalid port range %v-%v/%v", fromPort, toPort, protocol)
   909  	}
   910  	defer errors.DeferredAnnotatef(&err, "cannot open ports %v for unit %q on subnet %q", ports, u, subnetID)
   911  
   912  	machineID, err := u.AssignedMachineId()
   913  	if err != nil {
   914  		return errors.Annotatef(err, "unit %q has no assigned machine", u)
   915  	}
   916  
   917  	if err := u.checkSubnetAliveWhenSet(subnetID); err != nil {
   918  		return errors.Trace(err)
   919  	}
   920  
   921  	machinePorts, err := getOrCreatePorts(u.st, machineID, subnetID)
   922  	if err != nil {
   923  		return errors.Annotate(err, "cannot get or create ports")
   924  	}
   925  
   926  	return machinePorts.OpenPorts(ports)
   927  }
   928  
   929  func (u *Unit) checkSubnetAliveWhenSet(subnetID string) error {
   930  	if subnetID == "" {
   931  		return nil
   932  	} else if !names.IsValidSubnet(subnetID) {
   933  		return errors.Errorf("invalid subnet ID %q", subnetID)
   934  	}
   935  
   936  	subnet, err := u.st.Subnet(subnetID)
   937  	if err != nil && !errors.IsNotFound(err) {
   938  		return errors.Annotatef(err, "getting subnet %q", subnetID)
   939  	} else if errors.IsNotFound(err) || subnet.Life() != Alive {
   940  		return errors.Errorf("subnet %q not found or not alive", subnetID)
   941  	}
   942  	return nil
   943  }
   944  
   945  // ClosePortsOnSubnet closes the given port range and protocol for the unit on
   946  // the given subnet, which can be empty. When non-empty, subnetID must refer to
   947  // an existing, alive subnet, otherwise an error is returned.
   948  func (u *Unit) ClosePortsOnSubnet(subnetID, protocol string, fromPort, toPort int) (err error) {
   949  	ports, err := NewPortRange(u.Name(), fromPort, toPort, protocol)
   950  	if err != nil {
   951  		return errors.Annotatef(err, "invalid port range %v-%v/%v", fromPort, toPort, protocol)
   952  	}
   953  	defer errors.DeferredAnnotatef(&err, "cannot close ports %v for unit %q on subnet %q", ports, u, subnetID)
   954  
   955  	machineID, err := u.AssignedMachineId()
   956  	if err != nil {
   957  		return errors.Annotatef(err, "unit %q has no assigned machine", u)
   958  	}
   959  
   960  	if err := u.checkSubnetAliveWhenSet(subnetID); err != nil {
   961  		return errors.Trace(err)
   962  	}
   963  
   964  	machinePorts, err := getOrCreatePorts(u.st, machineID, subnetID)
   965  	if err != nil {
   966  		return errors.Annotate(err, "cannot get or create ports")
   967  	}
   968  
   969  	return machinePorts.ClosePorts(ports)
   970  }
   971  
   972  // OpenPorts opens the given port range and protocol for the unit, if it does
   973  // not conflict with another already opened range on the unit's assigned
   974  // machine.
   975  //
   976  // TODO(dimitern): This should be removed once we use OpenPortsOnSubnet across
   977  // the board, passing subnet IDs explicitly.
   978  func (u *Unit) OpenPorts(protocol string, fromPort, toPort int) error {
   979  	return u.OpenPortsOnSubnet("", protocol, fromPort, toPort)
   980  }
   981  
   982  // ClosePorts closes the given port range and protocol for the unit.
   983  //
   984  // TODO(dimitern): This should be removed once we use ClosePortsOnSubnet across
   985  // the board, passing subnet IDs explicitly.
   986  func (u *Unit) ClosePorts(protocol string, fromPort, toPort int) (err error) {
   987  	return u.ClosePortsOnSubnet("", protocol, fromPort, toPort)
   988  }
   989  
   990  // OpenPortOnSubnet opens the given port and protocol for the unit on the given
   991  // subnet, which can be empty. When non-empty, subnetID must refer to an
   992  // existing, alive subnet, otherwise an error is returned.
   993  func (u *Unit) OpenPortOnSubnet(subnetID, protocol string, number int) error {
   994  	return u.OpenPortsOnSubnet(subnetID, protocol, number, number)
   995  }
   996  
   997  // ClosePortOnSubnet closes the given port and protocol for the unit on the given
   998  // subnet, which can be empty. When non-empty, subnetID must refer to an
   999  // existing, alive subnet, otherwise an error is returned.
  1000  func (u *Unit) ClosePortOnSubnet(subnetID, protocol string, number int) error {
  1001  	return u.ClosePortsOnSubnet(subnetID, protocol, number, number)
  1002  }
  1003  
  1004  // OpenPort opens the given port and protocol for the unit.
  1005  //
  1006  // TODO(dimitern): This should be removed once we use OpenPort(s)OnSubnet across
  1007  // the board, passing subnet IDs explicitly.
  1008  func (u *Unit) OpenPort(protocol string, number int) error {
  1009  	return u.OpenPortOnSubnet("", protocol, number)
  1010  }
  1011  
  1012  // ClosePort closes the given port and protocol for the unit.
  1013  //
  1014  // TODO(dimitern): This should be removed once we use ClosePortsOnSubnet across
  1015  // the board, passing subnet IDs explicitly.
  1016  func (u *Unit) ClosePort(protocol string, number int) error {
  1017  	return u.ClosePortOnSubnet("", protocol, number)
  1018  }
  1019  
  1020  // OpenedPortsOnSubnet returns a slice containing the open port ranges of the
  1021  // unit on the given subnet ID, which can be empty. When subnetID is not empty,
  1022  // it must refer to an existing, alive subnet, otherwise an error is returned.
  1023  // Also, when no ports are yet open for the unit on that subnet, no error and
  1024  // empty slice is returned.
  1025  func (u *Unit) OpenedPortsOnSubnet(subnetID string) ([]network.PortRange, error) {
  1026  	machineID, err := u.AssignedMachineId()
  1027  	if err != nil {
  1028  		return nil, errors.Annotatef(err, "unit %q has no assigned machine", u)
  1029  	}
  1030  
  1031  	if err := u.checkSubnetAliveWhenSet(subnetID); err != nil {
  1032  		return nil, errors.Trace(err)
  1033  	}
  1034  
  1035  	machinePorts, err := getPorts(u.st, machineID, subnetID)
  1036  	result := []network.PortRange{}
  1037  	if errors.IsNotFound(err) {
  1038  		return result, nil
  1039  	} else if err != nil {
  1040  		return nil, errors.Annotatef(err, "failed getting ports for unit %q, subnet %q", u, subnetID)
  1041  	}
  1042  	ports := machinePorts.PortsForUnit(u.Name())
  1043  	for _, port := range ports {
  1044  		result = append(result, network.PortRange{
  1045  			Protocol: port.Protocol,
  1046  			FromPort: port.FromPort,
  1047  			ToPort:   port.ToPort,
  1048  		})
  1049  	}
  1050  	network.SortPortRanges(result)
  1051  	return result, nil
  1052  }
  1053  
  1054  // OpenedPorts returns a slice containing the open port ranges of the
  1055  // unit.
  1056  //
  1057  // TODO(dimitern): This should be removed once we use OpenedPortsOnSubnet across
  1058  // the board, passing subnet IDs explicitly.
  1059  func (u *Unit) OpenedPorts() ([]network.PortRange, error) {
  1060  	return u.OpenedPortsOnSubnet("")
  1061  }
  1062  
  1063  // CharmURL returns the charm URL this unit is currently using.
  1064  func (u *Unit) CharmURL() (*charm.URL, bool) {
  1065  	if u.doc.CharmURL == nil {
  1066  		return nil, false
  1067  	}
  1068  	return u.doc.CharmURL, true
  1069  }
  1070  
  1071  // SetCharmURL marks the unit as currently using the supplied charm URL.
  1072  // An error will be returned if the unit is dead, or the charm URL not known.
  1073  func (u *Unit) SetCharmURL(curl *charm.URL) error {
  1074  	if curl == nil {
  1075  		return fmt.Errorf("cannot set nil charm url")
  1076  	}
  1077  
  1078  	db, closer := u.st.newDB()
  1079  	defer closer()
  1080  	units, closer := db.GetCollection(unitsC)
  1081  	defer closer()
  1082  	charms, closer := db.GetCollection(charmsC)
  1083  	defer closer()
  1084  
  1085  	buildTxn := func(attempt int) ([]txn.Op, error) {
  1086  		if attempt > 0 {
  1087  			// NOTE: We're explicitly allowing SetCharmURL to succeed
  1088  			// when the unit is Dying, because service/charm upgrades
  1089  			// should still be allowed to apply to dying units, so
  1090  			// that bugs in departed/broken hooks can be addressed at
  1091  			// runtime.
  1092  			if notDead, err := isNotDeadWithSession(units, u.doc.DocID); err != nil {
  1093  				return nil, errors.Trace(err)
  1094  			} else if !notDead {
  1095  				return nil, ErrDead
  1096  			}
  1097  		}
  1098  		sel := bson.D{{"_id", u.doc.DocID}, {"charmurl", curl}}
  1099  		if count, err := units.Find(sel).Count(); err != nil {
  1100  			return nil, errors.Trace(err)
  1101  		} else if count == 1 {
  1102  			// Already set
  1103  			return nil, jujutxn.ErrNoOperations
  1104  		}
  1105  		if count, err := charms.FindId(curl.String()).Count(); err != nil {
  1106  			return nil, errors.Trace(err)
  1107  		} else if count < 1 {
  1108  			return nil, errors.Errorf("unknown charm url %q", curl)
  1109  		}
  1110  
  1111  		// Add a reference to the service settings for the new charm.
  1112  		incOps, err := appCharmIncRefOps(u.st, u.doc.Application, curl, false)
  1113  		if err != nil {
  1114  			return nil, errors.Trace(err)
  1115  		}
  1116  
  1117  		// Set the new charm URL.
  1118  		differentCharm := bson.D{{"charmurl", bson.D{{"$ne", curl}}}}
  1119  		ops := append(incOps,
  1120  			txn.Op{
  1121  				C:      unitsC,
  1122  				Id:     u.doc.DocID,
  1123  				Assert: append(notDeadDoc, differentCharm...),
  1124  				Update: bson.D{{"$set", bson.D{{"charmurl", curl}}}},
  1125  			})
  1126  		if u.doc.CharmURL != nil {
  1127  			// Drop the reference to the old charm.
  1128  			decOps, err := appCharmDecRefOps(u.st, u.doc.Application, u.doc.CharmURL)
  1129  			if err != nil {
  1130  				return nil, errors.Trace(err)
  1131  			}
  1132  			ops = append(ops, decOps...)
  1133  		}
  1134  		return ops, nil
  1135  	}
  1136  	err := u.st.run(buildTxn)
  1137  	if err == nil {
  1138  		u.doc.CharmURL = curl
  1139  	}
  1140  	return err
  1141  }
  1142  
  1143  // AgentPresence returns whether the respective remote agent is alive.
  1144  func (u *Unit) AgentPresence() (bool, error) {
  1145  	pwatcher := u.st.workers.PresenceWatcher()
  1146  	return pwatcher.Alive(u.globalAgentKey())
  1147  }
  1148  
  1149  // Tag returns a name identifying the unit.
  1150  // The returned name will be different from other Tag values returned by any
  1151  // other entities from the same state.
  1152  func (u *Unit) Tag() names.Tag {
  1153  	return u.UnitTag()
  1154  }
  1155  
  1156  // UnitTag returns a names.UnitTag representing this Unit, unless the
  1157  // unit Name is invalid, in which case it will panic
  1158  func (u *Unit) UnitTag() names.UnitTag {
  1159  	return names.NewUnitTag(u.Name())
  1160  }
  1161  
  1162  // WaitAgentPresence blocks until the respective agent is alive.
  1163  func (u *Unit) WaitAgentPresence(timeout time.Duration) (err error) {
  1164  	defer errors.DeferredAnnotatef(&err, "waiting for agent of unit %q", u)
  1165  	ch := make(chan presence.Change)
  1166  	pwatcher := u.st.workers.PresenceWatcher()
  1167  	pwatcher.Watch(u.globalAgentKey(), ch)
  1168  	defer pwatcher.Unwatch(u.globalAgentKey(), ch)
  1169  	for i := 0; i < 2; i++ {
  1170  		select {
  1171  		case change := <-ch:
  1172  			if change.Alive {
  1173  				return nil
  1174  			}
  1175  		case <-time.After(timeout):
  1176  			// TODO(fwereade): 2016-03-17 lp:1558657
  1177  			return fmt.Errorf("still not alive after timeout")
  1178  		case <-pwatcher.Dead():
  1179  			return pwatcher.Err()
  1180  		}
  1181  	}
  1182  	panic(fmt.Sprintf("presence reported dead status twice in a row for unit %q", u))
  1183  }
  1184  
  1185  // SetAgentPresence signals that the agent for unit u is alive.
  1186  // It returns the started pinger.
  1187  func (u *Unit) SetAgentPresence() (*presence.Pinger, error) {
  1188  	presenceCollection := u.st.getPresenceCollection()
  1189  	p := presence.NewPinger(presenceCollection, u.st.ModelTag(), u.globalAgentKey())
  1190  	err := p.Start()
  1191  	if err != nil {
  1192  		return nil, err
  1193  	}
  1194  	return p, nil
  1195  }
  1196  
  1197  func unitNotAssignedError(u *Unit) error {
  1198  	msg := fmt.Sprintf("unit %q is not assigned to a machine", u)
  1199  	return errors.NewNotAssigned(nil, msg)
  1200  }
  1201  
  1202  // AssignedMachineId returns the id of the assigned machine.
  1203  func (u *Unit) AssignedMachineId() (id string, err error) {
  1204  	if u.IsPrincipal() {
  1205  		if u.doc.MachineId == "" {
  1206  			return "", unitNotAssignedError(u)
  1207  		}
  1208  		return u.doc.MachineId, nil
  1209  	}
  1210  
  1211  	units, closer := u.st.getCollection(unitsC)
  1212  	defer closer()
  1213  
  1214  	pudoc := unitDoc{}
  1215  	err = units.FindId(u.doc.Principal).One(&pudoc)
  1216  	if err == mgo.ErrNotFound {
  1217  		return "", errors.NotFoundf("principal unit %q of %q", u.doc.Principal, u)
  1218  	} else if err != nil {
  1219  		return "", err
  1220  	}
  1221  	if pudoc.MachineId == "" {
  1222  		return "", unitNotAssignedError(u)
  1223  	}
  1224  	return pudoc.MachineId, nil
  1225  }
  1226  
  1227  var (
  1228  	machineNotAliveErr = errors.New("machine is not alive")
  1229  	machineNotCleanErr = errors.New("machine is dirty")
  1230  	unitNotAliveErr    = errors.New("unit is not alive")
  1231  	alreadyAssignedErr = errors.New("unit is already assigned to a machine")
  1232  	inUseErr           = errors.New("machine is not unused")
  1233  )
  1234  
  1235  // assignToMachine is the internal version of AssignToMachine.
  1236  func (u *Unit) assignToMachine(m *Machine, unused bool) (err error) {
  1237  	buildTxn := func(attempt int) ([]txn.Op, error) {
  1238  		u, m := u, m // don't change outer vars
  1239  		if attempt > 0 {
  1240  			var err error
  1241  			u, err = u.st.Unit(u.Name())
  1242  			if err != nil {
  1243  				return nil, errors.Trace(err)
  1244  			}
  1245  			m, err = u.st.Machine(m.Id())
  1246  			if err != nil {
  1247  				return nil, errors.Trace(err)
  1248  			}
  1249  		}
  1250  		return u.assignToMachineOps(m, unused)
  1251  	}
  1252  	if err := u.st.run(buildTxn); err != nil {
  1253  		return errors.Trace(err)
  1254  	}
  1255  	u.doc.MachineId = m.doc.Id
  1256  	m.doc.Clean = false
  1257  	return nil
  1258  }
  1259  
  1260  // assignToMachineOps returns txn.Ops to assign a unit to a machine.
  1261  // assignToMachineOps returns specific errors in some cases:
  1262  // - machineNotAliveErr when the machine is not alive.
  1263  // - unitNotAliveErr when the unit is not alive.
  1264  // - alreadyAssignedErr when the unit has already been assigned
  1265  // - inUseErr when the machine already has a unit assigned (if unused is true)
  1266  func (u *Unit) assignToMachineOps(m *Machine, unused bool) ([]txn.Op, error) {
  1267  	if u.Life() != Alive {
  1268  		return nil, unitNotAliveErr
  1269  	}
  1270  	if u.doc.MachineId != "" {
  1271  		if u.doc.MachineId != m.Id() {
  1272  			return nil, alreadyAssignedErr
  1273  		}
  1274  		return nil, jujutxn.ErrNoOperations
  1275  	}
  1276  	if unused && !m.doc.Clean {
  1277  		return nil, inUseErr
  1278  	}
  1279  	storageParams, err := u.machineStorageParams()
  1280  	if err != nil {
  1281  		return nil, errors.Trace(err)
  1282  	}
  1283  	storagePools, err := machineStoragePools(m.st, storageParams)
  1284  	if err != nil {
  1285  		return nil, errors.Trace(err)
  1286  	}
  1287  	if err := validateUnitMachineAssignment(
  1288  		m, u.doc.Series, u.doc.Principal != "", storagePools,
  1289  	); err != nil {
  1290  		return nil, errors.Trace(err)
  1291  	}
  1292  	storageOps, volumesAttached, filesystemsAttached, err := u.st.machineStorageOps(
  1293  		&m.doc, storageParams,
  1294  	)
  1295  	if err != nil {
  1296  		return nil, errors.Trace(err)
  1297  	}
  1298  	// addMachineStorageAttachmentsOps will add a txn.Op that ensures
  1299  	// that no filesystems were concurrently added to the machine if
  1300  	// any of the filesystems being attached specify a location.
  1301  	attachmentOps, err := addMachineStorageAttachmentsOps(
  1302  		m, volumesAttached, filesystemsAttached,
  1303  	)
  1304  	if err != nil {
  1305  		return nil, errors.Trace(err)
  1306  	}
  1307  	storageOps = append(storageOps, attachmentOps...)
  1308  
  1309  	assert := append(isAliveDoc, bson.D{{
  1310  		// The unit's subordinates must not change while we're
  1311  		// assigning it to a machine, to ensure machine storage
  1312  		// is created for subordinate units.
  1313  		"subordinates", u.doc.Subordinates,
  1314  	}, {
  1315  		"$or", []bson.D{
  1316  			{{"machineid", ""}},
  1317  			{{"machineid", m.Id()}},
  1318  		},
  1319  	}}...)
  1320  	massert := isAliveDoc
  1321  	if unused {
  1322  		massert = append(massert, bson.D{{"clean", bson.D{{"$ne", false}}}}...)
  1323  	}
  1324  	ops := []txn.Op{{
  1325  		C:      unitsC,
  1326  		Id:     u.doc.DocID,
  1327  		Assert: assert,
  1328  		Update: bson.D{{"$set", bson.D{{"machineid", m.doc.Id}}}},
  1329  	}, {
  1330  		C:      machinesC,
  1331  		Id:     m.doc.DocID,
  1332  		Assert: massert,
  1333  		Update: bson.D{{"$addToSet", bson.D{{"principals", u.doc.Name}}}, {"$set", bson.D{{"clean", false}}}},
  1334  	},
  1335  		removeStagedAssignmentOp(u.doc.DocID),
  1336  	}
  1337  	ops = append(ops, storageOps...)
  1338  	return ops, nil
  1339  }
  1340  
  1341  // validateUnitMachineAssignment validates the parameters for assigning a unit
  1342  // to a specified machine.
  1343  func validateUnitMachineAssignment(
  1344  	m *Machine,
  1345  	series string,
  1346  	isSubordinate bool,
  1347  	storagePools set.Strings,
  1348  ) (err error) {
  1349  	if m.Life() != Alive {
  1350  		return machineNotAliveErr
  1351  	}
  1352  	if isSubordinate {
  1353  		return fmt.Errorf("unit is a subordinate")
  1354  	}
  1355  	if series != m.doc.Series {
  1356  		return fmt.Errorf("series does not match")
  1357  	}
  1358  	canHost := false
  1359  	for _, j := range m.doc.Jobs {
  1360  		if j == JobHostUnits {
  1361  			canHost = true
  1362  			break
  1363  		}
  1364  	}
  1365  	if !canHost {
  1366  		return fmt.Errorf("machine %q cannot host units", m)
  1367  	}
  1368  	if err := validateDynamicMachineStoragePools(m, storagePools); err != nil {
  1369  		return errors.Trace(err)
  1370  	}
  1371  	return nil
  1372  }
  1373  
  1374  // validateDynamicMachineStorageParams validates that the provided machine
  1375  // storage parameters are compatible with the specified machine.
  1376  func validateDynamicMachineStorageParams(m *Machine, params *machineStorageParams) error {
  1377  	pools, err := machineStoragePools(m.st, params)
  1378  	if err != nil {
  1379  		return err
  1380  	}
  1381  	return validateDynamicMachineStoragePools(m, pools)
  1382  }
  1383  
  1384  // machineStoragePools returns the names of storage pools in each of the
  1385  // volume, filesystem and attachments in the machine storage parameters.
  1386  func machineStoragePools(st *State, params *machineStorageParams) (set.Strings, error) {
  1387  	pools := make(set.Strings)
  1388  	for _, v := range params.volumes {
  1389  		v, err := st.volumeParamsWithDefaults(v.Volume)
  1390  		if err != nil {
  1391  			return nil, errors.Trace(err)
  1392  		}
  1393  		pools.Add(v.Pool)
  1394  	}
  1395  	for _, f := range params.filesystems {
  1396  		f, err := st.filesystemParamsWithDefaults(f.Filesystem)
  1397  		if err != nil {
  1398  			return nil, errors.Trace(err)
  1399  		}
  1400  		pools.Add(f.Pool)
  1401  	}
  1402  	for volumeTag := range params.volumeAttachments {
  1403  		volume, err := st.Volume(volumeTag)
  1404  		if err != nil {
  1405  			return nil, errors.Trace(err)
  1406  		}
  1407  		if params, ok := volume.Params(); ok {
  1408  			pools.Add(params.Pool)
  1409  		} else {
  1410  			info, err := volume.Info()
  1411  			if err != nil {
  1412  				return nil, errors.Trace(err)
  1413  			}
  1414  			pools.Add(info.Pool)
  1415  		}
  1416  	}
  1417  	for filesystemTag := range params.filesystemAttachments {
  1418  		filesystem, err := st.Filesystem(filesystemTag)
  1419  		if err != nil {
  1420  			return nil, errors.Trace(err)
  1421  		}
  1422  		if params, ok := filesystem.Params(); ok {
  1423  			pools.Add(params.Pool)
  1424  		} else {
  1425  			info, err := filesystem.Info()
  1426  			if err != nil {
  1427  				return nil, errors.Trace(err)
  1428  			}
  1429  			pools.Add(info.Pool)
  1430  		}
  1431  	}
  1432  	return pools, nil
  1433  }
  1434  
  1435  // validateDynamicMachineStoragePools validates that all of the specified
  1436  // storage pools support dynamic storage provisioning. If any provider doesn't
  1437  // support dynamic storage, then an IsNotSupported error is returned.
  1438  func validateDynamicMachineStoragePools(m *Machine, pools set.Strings) error {
  1439  	if pools.IsEmpty() {
  1440  		return nil
  1441  	}
  1442  	if m.ContainerType() != "" {
  1443  		// TODO(axw) consult storage providers to check if they
  1444  		// support adding storage to containers. Loop is fine,
  1445  		// for example.
  1446  		//
  1447  		// TODO(axw) later we might allow *any* storage, and
  1448  		// passthrough/bindmount storage. That would imply either
  1449  		// container creation time only, or requiring containers
  1450  		// to be restarted to pick up new configuration.
  1451  		return errors.NotSupportedf("adding storage to %s container", m.ContainerType())
  1452  	}
  1453  	return validateDynamicStoragePools(m.st, pools)
  1454  }
  1455  
  1456  // validateDynamicStoragePools validates that all of the specified storage
  1457  // providers support dynamic storage provisioning. If any provider doesn't
  1458  // support dynamic storage, then an IsNotSupported error is returned.
  1459  func validateDynamicStoragePools(st *State, pools set.Strings) error {
  1460  	for pool := range pools {
  1461  		providerType, provider, err := poolStorageProvider(st, pool)
  1462  		if err != nil {
  1463  			return errors.Trace(err)
  1464  		}
  1465  		if !provider.Dynamic() {
  1466  			return errors.NewNotSupported(err, fmt.Sprintf(
  1467  				"%q storage provider does not support dynamic storage",
  1468  				providerType,
  1469  			))
  1470  		}
  1471  	}
  1472  	return nil
  1473  }
  1474  
  1475  func assignContextf(err *error, unitName string, target string) {
  1476  	if *err != nil {
  1477  		*err = errors.Annotatef(*err,
  1478  			"cannot assign unit %q to %s",
  1479  			unitName, target,
  1480  		)
  1481  	}
  1482  }
  1483  
  1484  // AssignToMachine assigns this unit to a given machine.
  1485  func (u *Unit) AssignToMachine(m *Machine) (err error) {
  1486  	defer assignContextf(&err, u.Name(), fmt.Sprintf("machine %s", m))
  1487  	return u.assignToMachine(m, false)
  1488  }
  1489  
  1490  // assignToNewMachineOps returns txn.Ops to assign the unit to a machine
  1491  // created according to the supplied params, with the supplied constraints.
  1492  func (u *Unit) assignToNewMachineOps(
  1493  	template MachineTemplate,
  1494  	parentId string,
  1495  	containerType instance.ContainerType,
  1496  ) (*Machine, []txn.Op, error) {
  1497  
  1498  	if u.Life() != Alive {
  1499  		return nil, nil, unitNotAliveErr
  1500  	}
  1501  	if u.doc.MachineId != "" {
  1502  		return nil, nil, alreadyAssignedErr
  1503  	}
  1504  
  1505  	template.principals = []string{u.doc.Name}
  1506  	template.Dirty = true
  1507  
  1508  	var (
  1509  		mdoc *machineDoc
  1510  		ops  []txn.Op
  1511  		err  error
  1512  	)
  1513  	switch {
  1514  	case parentId == "" && containerType == "":
  1515  		mdoc, ops, err = u.st.addMachineOps(template)
  1516  	case parentId == "":
  1517  		if containerType == "" {
  1518  			return nil, nil, errors.New("assignToNewMachine called without container type (should never happen)")
  1519  		}
  1520  		// The new parent machine is clean and only hosts units,
  1521  		// regardless of its child.
  1522  		parentParams := template
  1523  		parentParams.Jobs = []MachineJob{JobHostUnits}
  1524  		mdoc, ops, err = u.st.addMachineInsideNewMachineOps(template, parentParams, containerType)
  1525  	default:
  1526  		mdoc, ops, err = u.st.addMachineInsideMachineOps(template, parentId, containerType)
  1527  	}
  1528  	if err != nil {
  1529  		return nil, nil, err
  1530  	}
  1531  
  1532  	// Ensure the host machine is really clean.
  1533  	if parentId != "" {
  1534  		mparent, err := u.st.Machine(parentId)
  1535  		if err != nil {
  1536  			return nil, nil, err
  1537  		}
  1538  		if !mparent.Clean() {
  1539  			return nil, nil, machineNotCleanErr
  1540  		}
  1541  		containers, err := mparent.Containers()
  1542  		if err != nil {
  1543  			return nil, nil, err
  1544  		}
  1545  		if len(containers) > 0 {
  1546  			return nil, nil, machineNotCleanErr
  1547  		}
  1548  		parentDocId := u.st.docID(parentId)
  1549  		ops = append(ops, txn.Op{
  1550  			C:      machinesC,
  1551  			Id:     parentDocId,
  1552  			Assert: bson.D{{"clean", true}},
  1553  		}, txn.Op{
  1554  			C:      containerRefsC,
  1555  			Id:     parentDocId,
  1556  			Assert: bson.D{hasNoContainersTerm},
  1557  		})
  1558  	}
  1559  
  1560  	// The unit's subordinates must not change while we're
  1561  	// assigning it to a machine, to ensure machine storage
  1562  	// is created for subordinate units.
  1563  	subordinatesUnchanged := bson.D{{"subordinates", u.doc.Subordinates}}
  1564  	isUnassigned := bson.D{{"machineid", ""}}
  1565  	asserts := append(isAliveDoc, isUnassigned...)
  1566  	asserts = append(asserts, subordinatesUnchanged...)
  1567  
  1568  	ops = append(ops, txn.Op{
  1569  		C:      unitsC,
  1570  		Id:     u.doc.DocID,
  1571  		Assert: asserts,
  1572  		Update: bson.D{{"$set", bson.D{{"machineid", mdoc.Id}}}},
  1573  	},
  1574  		removeStagedAssignmentOp(u.doc.DocID),
  1575  	)
  1576  	return &Machine{u.st, *mdoc}, ops, nil
  1577  }
  1578  
  1579  // Constraints returns the unit's deployment constraints.
  1580  func (u *Unit) Constraints() (*constraints.Value, error) {
  1581  	cons, err := readConstraints(u.st, u.globalAgentKey())
  1582  	if errors.IsNotFound(err) {
  1583  		// Lack of constraints indicates lack of unit.
  1584  		return nil, errors.NotFoundf("unit")
  1585  	} else if err != nil {
  1586  		return nil, err
  1587  	}
  1588  	return &cons, nil
  1589  }
  1590  
  1591  // AssignToNewMachineOrContainer assigns the unit to a new machine,
  1592  // with constraints determined according to the service and
  1593  // model constraints at the time of unit creation. If a
  1594  // container is required, a clean, empty machine instance is required
  1595  // on which to create the container. An existing clean, empty instance
  1596  // is first searched for, and if not found, a new one is created.
  1597  func (u *Unit) AssignToNewMachineOrContainer() (err error) {
  1598  	defer assignContextf(&err, u.Name(), "new machine or container")
  1599  	if u.doc.Principal != "" {
  1600  		return fmt.Errorf("unit is a subordinate")
  1601  	}
  1602  	cons, err := u.Constraints()
  1603  	if err != nil {
  1604  		return err
  1605  	}
  1606  	if !cons.HasContainer() {
  1607  		return u.AssignToNewMachine()
  1608  	}
  1609  
  1610  	// Find a clean, empty machine on which to create a container.
  1611  	hostCons := *cons
  1612  	noContainer := instance.NONE
  1613  	hostCons.Container = &noContainer
  1614  	query, err := u.findCleanMachineQuery(true, &hostCons)
  1615  	if err != nil {
  1616  		return err
  1617  	}
  1618  	machinesCollection, closer := u.st.getCollection(machinesC)
  1619  	defer closer()
  1620  	var host machineDoc
  1621  	if err := machinesCollection.Find(query).One(&host); err == mgo.ErrNotFound {
  1622  		// No existing clean, empty machine so create a new one. The
  1623  		// container constraint will be used by AssignToNewMachine to
  1624  		// create the required container.
  1625  		return u.AssignToNewMachine()
  1626  	} else if err != nil {
  1627  		return err
  1628  	}
  1629  
  1630  	var m *Machine
  1631  	buildTxn := func(attempt int) ([]txn.Op, error) {
  1632  		var err error
  1633  		u := u // don't change outer var
  1634  		if attempt > 0 {
  1635  			u, err = u.st.Unit(u.Name())
  1636  			if err != nil {
  1637  				return nil, errors.Trace(err)
  1638  			}
  1639  		}
  1640  		template := MachineTemplate{
  1641  			Series:      u.doc.Series,
  1642  			Constraints: *cons,
  1643  			Jobs:        []MachineJob{JobHostUnits},
  1644  		}
  1645  		var ops []txn.Op
  1646  		m, ops, err = u.assignToNewMachineOps(template, host.Id, *cons.Container)
  1647  		return ops, err
  1648  	}
  1649  	if err := u.st.run(buildTxn); err != nil {
  1650  		if errors.Cause(err) == machineNotCleanErr {
  1651  			// The clean machine was used before we got a chance
  1652  			// to use it so just stick the unit on a new machine.
  1653  			return u.AssignToNewMachine()
  1654  		}
  1655  		return errors.Trace(err)
  1656  	}
  1657  	u.doc.MachineId = m.doc.Id
  1658  	return nil
  1659  }
  1660  
  1661  // AssignToNewMachine assigns the unit to a new machine, with constraints
  1662  // determined according to the service and model constraints at the
  1663  // time of unit creation.
  1664  func (u *Unit) AssignToNewMachine() (err error) {
  1665  	defer assignContextf(&err, u.Name(), "new machine")
  1666  	if u.doc.Principal != "" {
  1667  		return fmt.Errorf("unit is a subordinate")
  1668  	}
  1669  	var m *Machine
  1670  	buildTxn := func(attempt int) ([]txn.Op, error) {
  1671  		var err error
  1672  		u := u // don't change outer var
  1673  		if attempt > 0 {
  1674  			u, err = u.st.Unit(u.Name())
  1675  			if err != nil {
  1676  				return nil, errors.Trace(err)
  1677  			}
  1678  		}
  1679  		cons, err := u.Constraints()
  1680  		if err != nil {
  1681  			return nil, err
  1682  		}
  1683  		var containerType instance.ContainerType
  1684  		if cons.HasContainer() {
  1685  			containerType = *cons.Container
  1686  		}
  1687  		storageParams, err := u.machineStorageParams()
  1688  		if err != nil {
  1689  			return nil, errors.Trace(err)
  1690  		}
  1691  		template := MachineTemplate{
  1692  			Series:                u.doc.Series,
  1693  			Constraints:           *cons,
  1694  			Jobs:                  []MachineJob{JobHostUnits},
  1695  			Volumes:               storageParams.volumes,
  1696  			VolumeAttachments:     storageParams.volumeAttachments,
  1697  			Filesystems:           storageParams.filesystems,
  1698  			FilesystemAttachments: storageParams.filesystemAttachments,
  1699  		}
  1700  		// Get the ops necessary to create a new machine, and the
  1701  		// machine doc that will be added with those operations
  1702  		// (which includes the machine id).
  1703  		var ops []txn.Op
  1704  		m, ops, err = u.assignToNewMachineOps(template, "", containerType)
  1705  		return ops, err
  1706  	}
  1707  	if err := u.st.run(buildTxn); err != nil {
  1708  		return errors.Trace(err)
  1709  	}
  1710  	u.doc.MachineId = m.doc.Id
  1711  	return nil
  1712  }
  1713  
  1714  type byStorageInstance []StorageAttachment
  1715  
  1716  func (b byStorageInstance) Len() int      { return len(b) }
  1717  func (b byStorageInstance) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
  1718  
  1719  func (b byStorageInstance) Less(i, j int) bool {
  1720  	return b[i].StorageInstance().String() < b[j].StorageInstance().String()
  1721  }
  1722  
  1723  // machineStorageParams returns parameters for creating volumes/filesystems
  1724  // and volume/filesystem attachments for a machine that the unit will be
  1725  // assigned to.
  1726  func (u *Unit) machineStorageParams() (*machineStorageParams, error) {
  1727  	params, err := unitMachineStorageParams(u)
  1728  	if err != nil {
  1729  		return nil, errors.Trace(err)
  1730  	}
  1731  	for _, name := range u.doc.Subordinates {
  1732  		sub, err := u.st.Unit(name)
  1733  		if err != nil {
  1734  			return nil, errors.Trace(err)
  1735  		}
  1736  		subParams, err := unitMachineStorageParams(sub)
  1737  		if err != nil {
  1738  			return nil, errors.Trace(err)
  1739  		}
  1740  		params = combineMachineStorageParams(params, subParams)
  1741  	}
  1742  	return params, nil
  1743  }
  1744  
  1745  func unitMachineStorageParams(u *Unit) (*machineStorageParams, error) {
  1746  	storageAttachments, err := u.st.UnitStorageAttachments(u.UnitTag())
  1747  	if err != nil {
  1748  		return nil, errors.Annotate(err, "getting storage attachments")
  1749  	}
  1750  	curl := u.doc.CharmURL
  1751  	if curl == nil {
  1752  		var err error
  1753  		app, err := u.Application()
  1754  		if err != nil {
  1755  			return nil, errors.Trace(err)
  1756  		}
  1757  		curl, _ = app.CharmURL()
  1758  	}
  1759  	ch, err := u.st.Charm(curl)
  1760  	if err != nil {
  1761  		return nil, errors.Annotate(err, "getting charm")
  1762  	}
  1763  	allCons, err := u.StorageConstraints()
  1764  	if err != nil {
  1765  		return nil, errors.Annotatef(err, "getting storage constraints")
  1766  	}
  1767  
  1768  	// Sort storage attachments so the volume ids are consistent (for testing).
  1769  	sort.Sort(byStorageInstance(storageAttachments))
  1770  
  1771  	chMeta := ch.Meta()
  1772  
  1773  	var volumes []MachineVolumeParams
  1774  	var filesystems []MachineFilesystemParams
  1775  	volumeAttachments := make(map[names.VolumeTag]VolumeAttachmentParams)
  1776  	filesystemAttachments := make(map[names.FilesystemTag]FilesystemAttachmentParams)
  1777  	for _, storageAttachment := range storageAttachments {
  1778  		storage, err := u.st.StorageInstance(storageAttachment.StorageInstance())
  1779  		if err != nil {
  1780  			return nil, errors.Annotatef(err, "getting storage instance")
  1781  		}
  1782  		machineParams, err := machineStorageParamsForStorageInstance(
  1783  			u.st, chMeta, u.UnitTag(), u.Series(), allCons, storage,
  1784  		)
  1785  		if err != nil {
  1786  			return nil, errors.Trace(err)
  1787  		}
  1788  
  1789  		volumes = append(volumes, machineParams.volumes...)
  1790  		for k, v := range machineParams.volumeAttachments {
  1791  			volumeAttachments[k] = v
  1792  		}
  1793  
  1794  		filesystems = append(filesystems, machineParams.filesystems...)
  1795  		for k, v := range machineParams.filesystemAttachments {
  1796  			filesystemAttachments[k] = v
  1797  		}
  1798  	}
  1799  	result := &machineStorageParams{
  1800  		volumes,
  1801  		volumeAttachments,
  1802  		filesystems,
  1803  		filesystemAttachments,
  1804  	}
  1805  	return result, nil
  1806  }
  1807  
  1808  // machineStorageParamsForStorageInstance returns parameters for creating
  1809  // volumes/filesystems and volume/filesystem attachments for a machine that
  1810  // the unit will be assigned to. These parameters are based on a given storage
  1811  // instance.
  1812  func machineStorageParamsForStorageInstance(
  1813  	st *State,
  1814  	charmMeta *charm.Meta,
  1815  	unit names.UnitTag,
  1816  	series string,
  1817  	allCons map[string]StorageConstraints,
  1818  	storage StorageInstance,
  1819  ) (*machineStorageParams, error) {
  1820  
  1821  	charmStorage := charmMeta.Storage[storage.StorageName()]
  1822  
  1823  	var volumes []MachineVolumeParams
  1824  	var filesystems []MachineFilesystemParams
  1825  	volumeAttachments := make(map[names.VolumeTag]VolumeAttachmentParams)
  1826  	filesystemAttachments := make(map[names.FilesystemTag]FilesystemAttachmentParams)
  1827  
  1828  	switch storage.Kind() {
  1829  	case StorageKindBlock:
  1830  		volumeAttachmentParams := VolumeAttachmentParams{
  1831  			charmStorage.ReadOnly,
  1832  		}
  1833  		if unit == storage.Owner() {
  1834  			// The storage instance is owned by the unit, so we'll need
  1835  			// to create a volume.
  1836  			cons := allCons[storage.StorageName()]
  1837  			volumeParams := VolumeParams{
  1838  				storage: storage.StorageTag(),
  1839  				binding: storage.StorageTag(),
  1840  				Pool:    cons.Pool,
  1841  				Size:    cons.Size,
  1842  			}
  1843  			volumes = append(volumes, MachineVolumeParams{
  1844  				volumeParams, volumeAttachmentParams,
  1845  			})
  1846  		} else {
  1847  			// The storage instance is owned by the service, so there
  1848  			// should be a (shared) volume already, for which we will
  1849  			// just add an attachment.
  1850  			volume, err := st.StorageInstanceVolume(storage.StorageTag())
  1851  			if err != nil {
  1852  				return nil, errors.Annotatef(err, "getting volume for storage %q", storage.Tag().Id())
  1853  			}
  1854  			volumeAttachments[volume.VolumeTag()] = volumeAttachmentParams
  1855  		}
  1856  	case StorageKindFilesystem:
  1857  		location, err := filesystemMountPoint(charmStorage, storage.StorageTag(), series)
  1858  		if err != nil {
  1859  			return nil, errors.Annotatef(
  1860  				err, "getting filesystem mount point for storage %s",
  1861  				storage.StorageName(),
  1862  			)
  1863  		}
  1864  		filesystemAttachmentParams := FilesystemAttachmentParams{
  1865  			charmStorage.Location == "", // auto-generated location
  1866  			location,
  1867  			charmStorage.ReadOnly,
  1868  		}
  1869  		if unit == storage.Owner() {
  1870  			// The storage instance is owned by the unit, so we'll need
  1871  			// to create a filesystem.
  1872  			cons := allCons[storage.StorageName()]
  1873  			filesystemParams := FilesystemParams{
  1874  				storage: storage.StorageTag(),
  1875  				binding: storage.StorageTag(),
  1876  				Pool:    cons.Pool,
  1877  				Size:    cons.Size,
  1878  			}
  1879  			filesystems = append(filesystems, MachineFilesystemParams{
  1880  				filesystemParams, filesystemAttachmentParams,
  1881  			})
  1882  		} else {
  1883  			// The storage instance is owned by the service, so there
  1884  			// should be a (shared) filesystem already, for which we will
  1885  			// just add an attachment.
  1886  			filesystem, err := st.StorageInstanceFilesystem(storage.StorageTag())
  1887  			if err != nil {
  1888  				return nil, errors.Annotatef(err, "getting filesystem for storage %q", storage.Tag().Id())
  1889  			}
  1890  			filesystemAttachments[filesystem.FilesystemTag()] = filesystemAttachmentParams
  1891  		}
  1892  	default:
  1893  		return nil, errors.Errorf("invalid storage kind %v", storage.Kind())
  1894  	}
  1895  	result := &machineStorageParams{
  1896  		volumes,
  1897  		volumeAttachments,
  1898  		filesystems,
  1899  		filesystemAttachments,
  1900  	}
  1901  	return result, nil
  1902  }
  1903  
  1904  var noCleanMachines = errors.New("all eligible machines in use")
  1905  
  1906  // AssignToCleanMachine assigns u to a machine which is marked as clean. A machine
  1907  // is clean if it has never had any principal units assigned to it.
  1908  // If there are no clean machines besides any machine(s) running JobHostEnviron,
  1909  // an error is returned.
  1910  // This method does not take constraints into consideration when choosing a
  1911  // machine (lp:1161919).
  1912  func (u *Unit) AssignToCleanMachine() (m *Machine, err error) {
  1913  	return u.assignToCleanMaybeEmptyMachine(false)
  1914  }
  1915  
  1916  // AssignToCleanEmptyMachine assigns u to a machine which is marked as clean and is also
  1917  // not hosting any containers. A machine is clean if it has never had any principal units
  1918  // assigned to it. If there are no clean machines besides any machine(s) running JobHostEnviron,
  1919  // an error is returned.
  1920  // This method does not take constraints into consideration when choosing a
  1921  // machine (lp:1161919).
  1922  func (u *Unit) AssignToCleanEmptyMachine() (m *Machine, err error) {
  1923  	return u.assignToCleanMaybeEmptyMachine(true)
  1924  }
  1925  
  1926  var hasContainerTerm = bson.DocElem{
  1927  	"$and", []bson.D{
  1928  		{{"children", bson.D{{"$not", bson.D{{"$size", 0}}}}}},
  1929  		{{"children", bson.D{{"$exists", true}}}},
  1930  	}}
  1931  
  1932  var hasNoContainersTerm = bson.DocElem{
  1933  	"$or", []bson.D{
  1934  		{{"children", bson.D{{"$size", 0}}}},
  1935  		{{"children", bson.D{{"$exists", false}}}},
  1936  	}}
  1937  
  1938  // findCleanMachineQuery returns a Mongo query to find clean (and possibly empty) machines with
  1939  // characteristics matching the specified constraints.
  1940  func (u *Unit) findCleanMachineQuery(requireEmpty bool, cons *constraints.Value) (bson.D, error) {
  1941  	db, closer := u.st.newDB()
  1942  	defer closer()
  1943  	containerRefsCollection, closer := db.GetCollection(containerRefsC)
  1944  	defer closer()
  1945  
  1946  	// Select all machines that can accept principal units and are clean.
  1947  	var containerRefs []machineContainers
  1948  	// If we need empty machines, first build up a list of machine ids which have containers
  1949  	// so we can exclude those.
  1950  	if requireEmpty {
  1951  		err := containerRefsCollection.Find(bson.D{hasContainerTerm}).All(&containerRefs)
  1952  		if err != nil {
  1953  			return nil, err
  1954  		}
  1955  	}
  1956  	var machinesWithContainers = make([]string, len(containerRefs))
  1957  	for i, cref := range containerRefs {
  1958  		machinesWithContainers[i] = cref.Id
  1959  	}
  1960  	terms := bson.D{
  1961  		{"life", Alive},
  1962  		{"series", u.doc.Series},
  1963  		{"jobs", []MachineJob{JobHostUnits}},
  1964  		{"clean", true},
  1965  		{"machineid", bson.D{{"$nin", machinesWithContainers}}},
  1966  	}
  1967  	// Add the container filter term if necessary.
  1968  	var containerType instance.ContainerType
  1969  	if cons.Container != nil {
  1970  		containerType = *cons.Container
  1971  	}
  1972  	if containerType == instance.NONE {
  1973  		terms = append(terms, bson.DocElem{"containertype", ""})
  1974  	} else if containerType != "" {
  1975  		terms = append(terms, bson.DocElem{"containertype", string(containerType)})
  1976  	}
  1977  
  1978  	// Find the ids of machines which satisfy any required hardware
  1979  	// constraints. If there is no instanceData for a machine, that
  1980  	// machine is not considered as suitable for deploying the unit.
  1981  	// This can happen if the machine is not yet provisioned. It may
  1982  	// be that when the machine is provisioned it will be found to
  1983  	// be suitable, but we don't know that right now and it's best
  1984  	// to err on the side of caution and exclude such machines.
  1985  	var suitableInstanceData []instanceData
  1986  	var suitableTerms bson.D
  1987  	if cons.Arch != nil && *cons.Arch != "" {
  1988  		suitableTerms = append(suitableTerms, bson.DocElem{"arch", *cons.Arch})
  1989  	}
  1990  	if cons.Mem != nil && *cons.Mem > 0 {
  1991  		suitableTerms = append(suitableTerms, bson.DocElem{"mem", bson.D{{"$gte", *cons.Mem}}})
  1992  	}
  1993  	if cons.RootDisk != nil && *cons.RootDisk > 0 {
  1994  		suitableTerms = append(suitableTerms, bson.DocElem{"rootdisk", bson.D{{"$gte", *cons.RootDisk}}})
  1995  	}
  1996  	if cons.CpuCores != nil && *cons.CpuCores > 0 {
  1997  		suitableTerms = append(suitableTerms, bson.DocElem{"cpucores", bson.D{{"$gte", *cons.CpuCores}}})
  1998  	}
  1999  	if cons.CpuPower != nil && *cons.CpuPower > 0 {
  2000  		suitableTerms = append(suitableTerms, bson.DocElem{"cpupower", bson.D{{"$gte", *cons.CpuPower}}})
  2001  	}
  2002  	if cons.Tags != nil && len(*cons.Tags) > 0 {
  2003  		suitableTerms = append(suitableTerms, bson.DocElem{"tags", bson.D{{"$all", *cons.Tags}}})
  2004  	}
  2005  	if len(suitableTerms) > 0 {
  2006  		instanceDataCollection, closer := db.GetCollection(instanceDataC)
  2007  		defer closer()
  2008  		err := instanceDataCollection.Find(suitableTerms).Select(bson.M{"_id": 1}).All(&suitableInstanceData)
  2009  		if err != nil {
  2010  			return nil, err
  2011  		}
  2012  		var suitableIds = make([]string, len(suitableInstanceData))
  2013  		for i, m := range suitableInstanceData {
  2014  			suitableIds[i] = m.DocID
  2015  		}
  2016  		terms = append(terms, bson.DocElem{"_id", bson.D{{"$in", suitableIds}}})
  2017  	}
  2018  	return terms, nil
  2019  }
  2020  
  2021  // assignToCleanMaybeEmptyMachine implements AssignToCleanMachine and AssignToCleanEmptyMachine.
  2022  // A 'machine' may be a machine instance or container depending on the service constraints.
  2023  func (u *Unit) assignToCleanMaybeEmptyMachine(requireEmpty bool) (*Machine, error) {
  2024  	var m *Machine
  2025  	buildTxn := func(attempt int) ([]txn.Op, error) {
  2026  		var err error
  2027  		u := u // don't change outer var
  2028  		if attempt > 0 {
  2029  			u, err = u.st.Unit(u.Name())
  2030  			if err != nil {
  2031  				return nil, errors.Trace(err)
  2032  			}
  2033  		}
  2034  		var ops []txn.Op
  2035  		m, ops, err = u.assignToCleanMaybeEmptyMachineOps(requireEmpty)
  2036  		return ops, err
  2037  	}
  2038  	if err := u.st.run(buildTxn); err != nil {
  2039  		return nil, errors.Trace(err)
  2040  	}
  2041  	u.doc.MachineId = m.doc.Id
  2042  	m.doc.Clean = false
  2043  	return m, nil
  2044  }
  2045  
  2046  func (u *Unit) assignToCleanMaybeEmptyMachineOps(requireEmpty bool) (_ *Machine, _ []txn.Op, err error) {
  2047  	failure := func(err error) (*Machine, []txn.Op, error) {
  2048  		return nil, nil, err
  2049  	}
  2050  
  2051  	context := "clean"
  2052  	if requireEmpty {
  2053  		context += ", empty"
  2054  	}
  2055  	context += " machine"
  2056  
  2057  	if u.doc.Principal != "" {
  2058  		err = fmt.Errorf("unit is a subordinate")
  2059  		assignContextf(&err, u.Name(), context)
  2060  		return failure(err)
  2061  	}
  2062  
  2063  	// If required storage is not all dynamic, then assigning
  2064  	// to a new machine is required.
  2065  	storageParams, err := u.machineStorageParams()
  2066  	if err != nil {
  2067  		assignContextf(&err, u.Name(), context)
  2068  		return failure(err)
  2069  	}
  2070  	storagePools, err := machineStoragePools(u.st, storageParams)
  2071  	if err != nil {
  2072  		assignContextf(&err, u.Name(), context)
  2073  		return failure(err)
  2074  	}
  2075  	if err := validateDynamicStoragePools(u.st, storagePools); err != nil {
  2076  		if errors.IsNotSupported(err) {
  2077  			return failure(noCleanMachines)
  2078  		}
  2079  		assignContextf(&err, u.Name(), context)
  2080  		return failure(err)
  2081  	}
  2082  
  2083  	// Get the unit constraints to see what deployment requirements we have to adhere to.
  2084  	cons, err := u.Constraints()
  2085  	if err != nil {
  2086  		assignContextf(&err, u.Name(), context)
  2087  		return failure(err)
  2088  	}
  2089  	query, err := u.findCleanMachineQuery(requireEmpty, cons)
  2090  	if err != nil {
  2091  		assignContextf(&err, u.Name(), context)
  2092  		return failure(err)
  2093  	}
  2094  
  2095  	// Find all of the candidate machines, and associated
  2096  	// instances for those that are provisioned. Instances
  2097  	// will be distributed across in preference to
  2098  	// unprovisioned machines.
  2099  	machinesCollection, closer := u.st.getCollection(machinesC)
  2100  	defer closer()
  2101  	var mdocs []*machineDoc
  2102  	if err := machinesCollection.Find(query).All(&mdocs); err != nil {
  2103  		assignContextf(&err, u.Name(), context)
  2104  		return failure(err)
  2105  	}
  2106  	var unprovisioned []*Machine
  2107  	var instances []instance.Id
  2108  	instanceMachines := make(map[instance.Id]*Machine)
  2109  	for _, mdoc := range mdocs {
  2110  		m := newMachine(u.st, mdoc)
  2111  		instance, err := m.InstanceId()
  2112  		if errors.IsNotProvisioned(err) {
  2113  			unprovisioned = append(unprovisioned, m)
  2114  		} else if err != nil {
  2115  			assignContextf(&err, u.Name(), context)
  2116  			return failure(err)
  2117  		} else {
  2118  			instances = append(instances, instance)
  2119  			instanceMachines[instance] = m
  2120  		}
  2121  	}
  2122  
  2123  	// Filter the list of instances that are suitable for
  2124  	// distribution, and then map them back to machines.
  2125  	//
  2126  	// TODO(axw) 2014-05-30 #1324904
  2127  	// Shuffle machines to reduce likelihood of collisions.
  2128  	// The partition of provisioned/unprovisioned machines
  2129  	// must be maintained.
  2130  	if instances, err = distributeUnit(u, instances); err != nil {
  2131  		assignContextf(&err, u.Name(), context)
  2132  		return failure(err)
  2133  	}
  2134  	machines := make([]*Machine, len(instances), len(instances)+len(unprovisioned))
  2135  	for i, instance := range instances {
  2136  		m, ok := instanceMachines[instance]
  2137  		if !ok {
  2138  			err := fmt.Errorf("invalid instance returned: %v", instance)
  2139  			assignContextf(&err, u.Name(), context)
  2140  			return failure(err)
  2141  		}
  2142  		machines[i] = m
  2143  	}
  2144  	machines = append(machines, unprovisioned...)
  2145  
  2146  	// TODO(axw) 2014-05-30 #1253704
  2147  	// We should not select a machine that is in the process
  2148  	// of being provisioned. There's no point asserting that
  2149  	// the machine hasn't been provisioned, as there'll still
  2150  	// be a period of time during which the machine may be
  2151  	// provisioned without the fact having yet been recorded
  2152  	// in state.
  2153  	for _, m := range machines {
  2154  		// Check that the unit storage is compatible with
  2155  		// the machine in question.
  2156  		if err := validateDynamicMachineStorageParams(m, storageParams); err != nil {
  2157  			if errors.IsNotSupported(err) {
  2158  				continue
  2159  			}
  2160  			assignContextf(&err, u.Name(), context)
  2161  			return failure(err)
  2162  		}
  2163  		ops, err := u.assignToMachineOps(m, true)
  2164  		if err == nil {
  2165  			return m, ops, nil
  2166  		}
  2167  		switch errors.Cause(err) {
  2168  		case inUseErr, machineNotAliveErr:
  2169  		default:
  2170  			assignContextf(&err, u.Name(), context)
  2171  			return failure(err)
  2172  		}
  2173  	}
  2174  	return failure(noCleanMachines)
  2175  }
  2176  
  2177  // UnassignFromMachine removes the assignment between this unit and the
  2178  // machine it's assigned to.
  2179  func (u *Unit) UnassignFromMachine() (err error) {
  2180  	// TODO check local machine id and add an assert that the
  2181  	// machine id is as expected.
  2182  	ops := []txn.Op{{
  2183  		C:      unitsC,
  2184  		Id:     u.doc.DocID,
  2185  		Assert: txn.DocExists,
  2186  		Update: bson.D{{"$set", bson.D{{"machineid", ""}}}},
  2187  	}}
  2188  	if u.doc.MachineId != "" {
  2189  		ops = append(ops, txn.Op{
  2190  			C:      machinesC,
  2191  			Id:     u.st.docID(u.doc.MachineId),
  2192  			Assert: txn.DocExists,
  2193  			Update: bson.D{{"$pull", bson.D{{"principals", u.doc.Name}}}},
  2194  		})
  2195  	}
  2196  	err = u.st.runTransaction(ops)
  2197  	if err != nil {
  2198  		return fmt.Errorf("cannot unassign unit %q from machine: %v", u, onAbort(err, errors.NotFoundf("machine")))
  2199  	}
  2200  	u.doc.MachineId = ""
  2201  	return nil
  2202  }
  2203  
  2204  // ActionSpecsByName is a map of action names to their respective ActionSpec.
  2205  type ActionSpecsByName map[string]charm.ActionSpec
  2206  
  2207  // AddAction adds a new Action of type name and using arguments payload to
  2208  // this Unit, and returns its ID.  Note that the use of spec.InsertDefaults
  2209  // mutates payload.
  2210  func (u *Unit) AddAction(name string, payload map[string]interface{}) (Action, error) {
  2211  	if len(name) == 0 {
  2212  		return nil, errors.New("no action name given")
  2213  	}
  2214  
  2215  	// If the action is predefined inside juju, get spec from map
  2216  	spec, ok := actions.PredefinedActionsSpec[name]
  2217  	if !ok {
  2218  		specs, err := u.ActionSpecs()
  2219  		if err != nil {
  2220  			return nil, err
  2221  		}
  2222  		spec, ok = specs[name]
  2223  		if !ok {
  2224  			return nil, errors.Errorf("action %q not defined on unit %q", name, u.Name())
  2225  		}
  2226  	}
  2227  	// Reject bad payloads before attempting to insert defaults.
  2228  	err := spec.ValidateParams(payload)
  2229  	if err != nil {
  2230  		return nil, err
  2231  	}
  2232  	payloadWithDefaults, err := spec.InsertDefaults(payload)
  2233  	if err != nil {
  2234  		return nil, err
  2235  	}
  2236  	return u.st.EnqueueAction(u.Tag(), name, payloadWithDefaults)
  2237  }
  2238  
  2239  // ActionSpecs gets the ActionSpec map for the Unit's charm.
  2240  func (u *Unit) ActionSpecs() (ActionSpecsByName, error) {
  2241  	none := ActionSpecsByName{}
  2242  	curl, _ := u.CharmURL()
  2243  	if curl == nil {
  2244  		// If unit charm URL is not yet set, fall back to service
  2245  		svc, err := u.Application()
  2246  		if err != nil {
  2247  			return none, err
  2248  		}
  2249  		curl, _ = svc.CharmURL()
  2250  		if curl == nil {
  2251  			return none, errors.Errorf("no URL set for application %q", svc.Name())
  2252  		}
  2253  	}
  2254  	ch, err := u.st.Charm(curl)
  2255  	if err != nil {
  2256  		return none, errors.Annotatef(err, "unable to get charm with URL %q", curl.String())
  2257  	}
  2258  	chActions := ch.Actions()
  2259  	if chActions == nil || len(chActions.ActionSpecs) == 0 {
  2260  		return none, errors.Errorf("no actions defined on charm %q", ch.String())
  2261  	}
  2262  	return chActions.ActionSpecs, nil
  2263  }
  2264  
  2265  // CancelAction removes a pending Action from the queue for this
  2266  // ActionReceiver and marks it as cancelled.
  2267  func (u *Unit) CancelAction(action Action) (Action, error) {
  2268  	return action.Finish(ActionResults{Status: ActionCancelled})
  2269  }
  2270  
  2271  // WatchActionNotifications starts and returns a StringsWatcher that
  2272  // notifies when actions with Id prefixes matching this Unit are added
  2273  func (u *Unit) WatchActionNotifications() StringsWatcher {
  2274  	return u.st.watchEnqueuedActionsFilteredBy(u)
  2275  }
  2276  
  2277  // Actions returns a list of actions pending or completed for this unit.
  2278  func (u *Unit) Actions() ([]Action, error) {
  2279  	return u.st.matchingActions(u)
  2280  }
  2281  
  2282  // CompletedActions returns a list of actions that have finished for
  2283  // this unit.
  2284  func (u *Unit) CompletedActions() ([]Action, error) {
  2285  	return u.st.matchingActionsCompleted(u)
  2286  }
  2287  
  2288  // PendingActions returns a list of actions pending for this unit.
  2289  func (u *Unit) PendingActions() ([]Action, error) {
  2290  	return u.st.matchingActionsPending(u)
  2291  }
  2292  
  2293  // RunningActions returns a list of actions running on this unit.
  2294  func (u *Unit) RunningActions() ([]Action, error) {
  2295  	return u.st.matchingActionsRunning(u)
  2296  }
  2297  
  2298  // Resolve marks the unit as having had any previous state transition
  2299  // problems resolved, and informs the unit that it may attempt to
  2300  // reestablish normal workflow. The retryHooks parameter informs
  2301  // whether to attempt to reexecute previous failed hooks or to continue
  2302  // as if they had succeeded before.
  2303  func (u *Unit) Resolve(noretryHooks bool) error {
  2304  	// We currently check agent status to see if a unit is
  2305  	// in error state. As the new Juju Health work is completed,
  2306  	// this will change to checking the unit status.
  2307  	statusInfo, err := u.Status()
  2308  	if err != nil {
  2309  		return err
  2310  	}
  2311  	if statusInfo.Status != status.Error {
  2312  		return errors.Errorf("unit %q is not in an error state", u)
  2313  	}
  2314  	mode := ResolvedRetryHooks
  2315  	if noretryHooks {
  2316  		mode = ResolvedNoHooks
  2317  	}
  2318  	return u.SetResolved(mode)
  2319  }
  2320  
  2321  // SetResolved marks the unit as having had any previous state transition
  2322  // problems resolved, and informs the unit that it may attempt to
  2323  // reestablish normal workflow. The resolved mode parameter informs
  2324  // whether to attempt to reexecute previous failed hooks or to continue
  2325  // as if they had succeeded before.
  2326  func (u *Unit) SetResolved(mode ResolvedMode) (err error) {
  2327  	defer errors.DeferredAnnotatef(&err, "cannot set resolved mode for unit %q", u)
  2328  	switch mode {
  2329  	case ResolvedRetryHooks, ResolvedNoHooks:
  2330  	default:
  2331  		return fmt.Errorf("invalid error resolution mode: %q", mode)
  2332  	}
  2333  	// TODO(fwereade): assert unit has error status.
  2334  	resolvedNotSet := bson.D{{"resolved", ResolvedNone}}
  2335  	ops := []txn.Op{{
  2336  		C:      unitsC,
  2337  		Id:     u.doc.DocID,
  2338  		Assert: append(notDeadDoc, resolvedNotSet...),
  2339  		Update: bson.D{{"$set", bson.D{{"resolved", mode}}}},
  2340  	}}
  2341  	if err := u.st.runTransaction(ops); err == nil {
  2342  		u.doc.Resolved = mode
  2343  		return nil
  2344  	} else if err != txn.ErrAborted {
  2345  		return err
  2346  	}
  2347  	if ok, err := isNotDead(u.st, unitsC, u.doc.DocID); err != nil {
  2348  		return err
  2349  	} else if !ok {
  2350  		return ErrDead
  2351  	}
  2352  	// For now, the only remaining assert is that resolved was unset.
  2353  	return fmt.Errorf("already resolved")
  2354  }
  2355  
  2356  // ClearResolved removes any resolved setting on the unit.
  2357  func (u *Unit) ClearResolved() error {
  2358  	ops := []txn.Op{{
  2359  		C:      unitsC,
  2360  		Id:     u.doc.DocID,
  2361  		Assert: txn.DocExists,
  2362  		Update: bson.D{{"$set", bson.D{{"resolved", ResolvedNone}}}},
  2363  	}}
  2364  	err := u.st.runTransaction(ops)
  2365  	if err != nil {
  2366  		return fmt.Errorf("cannot clear resolved mode for unit %q: %v", u, errors.NotFoundf("unit"))
  2367  	}
  2368  	u.doc.Resolved = ResolvedNone
  2369  	return nil
  2370  }
  2371  
  2372  // StorageConstraints returns the unit's storage constraints.
  2373  func (u *Unit) StorageConstraints() (map[string]StorageConstraints, error) {
  2374  	if u.doc.CharmURL == nil {
  2375  		app, err := u.st.Application(u.doc.Application)
  2376  		if err != nil {
  2377  			return nil, errors.Trace(err)
  2378  		}
  2379  		return app.StorageConstraints()
  2380  	}
  2381  	key := applicationStorageConstraintsKey(u.doc.Application, u.doc.CharmURL)
  2382  	cons, err := readStorageConstraints(u.st, key)
  2383  	if errors.IsNotFound(err) {
  2384  		return nil, nil
  2385  	} else if err != nil {
  2386  		return nil, errors.Trace(err)
  2387  	}
  2388  	return cons, nil
  2389  }
  2390  
  2391  type addUnitOpsArgs struct {
  2392  	unitDoc            *unitDoc
  2393  	agentStatusDoc     statusDoc
  2394  	workloadStatusDoc  statusDoc
  2395  	workloadVersionDoc statusDoc
  2396  	meterStatusDoc     *meterStatusDoc
  2397  }
  2398  
  2399  // addUnitOps returns the operations required to add a unit to the units
  2400  // collection, along with all the associated expected other unit entries. This
  2401  // method is used by both the *Service.addUnitOpsWithCons method and the
  2402  // migration import code.
  2403  func addUnitOps(st *State, args addUnitOpsArgs) ([]txn.Op, error) {
  2404  	name := args.unitDoc.Name
  2405  	agentGlobalKey := unitAgentGlobalKey(name)
  2406  
  2407  	// TODO: consider the constraints op
  2408  	// TODO: consider storageOps
  2409  	prereqOps := []txn.Op{
  2410  		createStatusOp(st, unitGlobalKey(name), args.workloadStatusDoc),
  2411  		createStatusOp(st, agentGlobalKey, args.agentStatusDoc),
  2412  		createStatusOp(st, globalWorkloadVersionKey(name), args.workloadVersionDoc),
  2413  		createMeterStatusOp(st, agentGlobalKey, args.meterStatusDoc),
  2414  	}
  2415  
  2416  	// Freshly-created units will not have a charm URL set; migrated
  2417  	// ones will, and they need to maintain their refcounts. If we
  2418  	// relax the restrictions on migrating apps mid-upgrade, this
  2419  	// will need to be more sophisticated, because it might need to
  2420  	// create the settings doc.
  2421  	if curl := args.unitDoc.CharmURL; curl != nil {
  2422  		appName := args.unitDoc.Application
  2423  		charmRefOps, err := appCharmIncRefOps(st, appName, curl, false)
  2424  		if err != nil {
  2425  			return nil, errors.Trace(err)
  2426  		}
  2427  		prereqOps = append(prereqOps, charmRefOps...)
  2428  	}
  2429  
  2430  	return append(prereqOps, txn.Op{
  2431  		C:      unitsC,
  2432  		Id:     name,
  2433  		Assert: txn.DocMissing,
  2434  		Insert: args.unitDoc,
  2435  	}), nil
  2436  }
  2437  
  2438  // HistoryGetter allows getting the status history based on some identifying key.
  2439  type HistoryGetter struct {
  2440  	st        *State
  2441  	globalKey string
  2442  }
  2443  
  2444  // StatusHistory implements status.StatusHistoryGetter.
  2445  func (g *HistoryGetter) StatusHistory(filter status.StatusHistoryFilter) ([]status.StatusInfo, error) {
  2446  	args := &statusHistoryArgs{
  2447  		st:        g.st,
  2448  		globalKey: g.globalKey,
  2449  		filter:    filter,
  2450  	}
  2451  	return statusHistory(args)
  2452  }