github.com/juju/juju@v0.0.0-20240430160146-1752b71fcf00/state/storage.go (about)

     1  // Copyright 2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package state
     5  
     6  import (
     7  	"fmt"
     8  	"sync"
     9  	"time"
    10  
    11  	"github.com/dustin/go-humanize"
    12  	"github.com/juju/charm/v12"
    13  	"github.com/juju/collections/set"
    14  	"github.com/juju/errors"
    15  	"github.com/juju/mgo/v3"
    16  	"github.com/juju/mgo/v3/bson"
    17  	"github.com/juju/mgo/v3/txn"
    18  	"github.com/juju/names/v5"
    19  	jujutxn "github.com/juju/txn/v3"
    20  
    21  	k8sconstants "github.com/juju/juju/caas/kubernetes/provider/constants"
    22  	"github.com/juju/juju/environs/config"
    23  	stateerrors "github.com/juju/juju/state/errors"
    24  	"github.com/juju/juju/storage"
    25  	"github.com/juju/juju/storage/poolmanager"
    26  	"github.com/juju/juju/storage/provider"
    27  )
    28  
    29  // StorageInstance represents the state of a unit or application-wide storage
    30  // instance in the model.
    31  type StorageInstance interface {
    32  	Entity
    33  
    34  	// StorageTag returns the tag for the storage instance.
    35  	StorageTag() names.StorageTag
    36  
    37  	// Kind returns the storage instance kind.
    38  	Kind() StorageKind
    39  
    40  	// Owner returns the tag of the application or unit that owns this storage
    41  	// instance, and a boolean indicating whether or not there is an owner.
    42  	//
    43  	// When a non-shared storage instance is detached from the unit, the
    44  	// storage instance's owner will be cleared, allowing it to be attached
    45  	// to another unit.
    46  	Owner() (names.Tag, bool)
    47  
    48  	// StorageName returns the name of the storage, as defined in the charm
    49  	// storage metadata. This does not uniquely identify storage instances,
    50  	// but identifies the group that the instances belong to.
    51  	StorageName() string
    52  
    53  	// Life reports whether the storage instance is Alive, Dying or Dead.
    54  	Life() Life
    55  
    56  	// Pool returns the name of the storage pool from which the storage
    57  	// instance has been or will be provisioned.
    58  	Pool() string
    59  }
    60  
    61  // StorageAttachment represents the state of a unit's attachment to a storage
    62  // instance. A non-shared storage instance will have a single attachment for
    63  // the storage instance's owning unit, whereas a shared storage instance will
    64  // have an attachment for each unit of the application owning the storage instance.
    65  type StorageAttachment interface {
    66  	// StorageInstance returns the tag of the corresponding storage
    67  	// instance.
    68  	StorageInstance() names.StorageTag
    69  
    70  	// Unit returns the tag of the corresponding unit.
    71  	Unit() names.UnitTag
    72  
    73  	// Life reports whether the storage attachment is Alive, Dying or Dead.
    74  	Life() Life
    75  }
    76  
    77  // StorageKind defines the type of a store: whether it is a block device
    78  // or a filesystem.
    79  type StorageKind int
    80  
    81  const (
    82  	StorageKindUnknown StorageKind = iota
    83  	StorageKindBlock
    84  	StorageKindFilesystem
    85  )
    86  
    87  // NewStorageBackend creates a backend for managing storage.
    88  func NewStorageBackend(st *State) (*storageBackend, error) {
    89  	// TODO(wallyworld) - we should be passing in a Model not a State
    90  	// (but need to move stuff off State first)
    91  	m, err := st.Model()
    92  	if err != nil {
    93  		return nil, errors.Trace(err)
    94  	}
    95  	sb := &storageBackend{
    96  		mb:              st,
    97  		settings:        NewStateSettings(st),
    98  		modelType:       m.Type(),
    99  		config:          m.ModelConfig,
   100  		application:     st.Application,
   101  		allApplications: st.AllApplications,
   102  		unit:            st.Unit,
   103  		machine:         st.Machine,
   104  	}
   105  	sb.registryInit = func() {
   106  		sb.spRegistry, sb.spRegistryErr = st.storageProviderRegistry()
   107  	}
   108  	return sb, nil
   109  }
   110  
   111  type storageBackend struct {
   112  	mb              modelBackend
   113  	config          func() (*config.Config, error)
   114  	application     func(string) (*Application, error)
   115  	allApplications func() ([]*Application, error)
   116  	unit            func(string) (*Unit, error)
   117  	machine         func(string) (*Machine, error)
   118  
   119  	modelType ModelType
   120  	settings  *StateSettings
   121  
   122  	spRegistry    storage.ProviderRegistry
   123  	spRegistryErr error
   124  	registryOnce  sync.Once
   125  	registryInit  func()
   126  }
   127  
   128  type storageInstance struct {
   129  	sb  *storageBackend
   130  	doc storageInstanceDoc
   131  }
   132  
   133  // String returns a human-readable string representing the type.
   134  func (k StorageKind) String() string {
   135  	switch k {
   136  	case StorageKindBlock:
   137  		return "block"
   138  	case StorageKindFilesystem:
   139  		return "filesystem"
   140  	default:
   141  		return "unknown"
   142  	}
   143  }
   144  
   145  // parseStorageKind is used by the migration code to go from the
   146  // string representation back to the enum.
   147  func parseStorageKind(value string) StorageKind {
   148  	switch value {
   149  	case "block":
   150  		return StorageKindBlock
   151  	case "filesystem":
   152  		return StorageKindFilesystem
   153  	default:
   154  		return StorageKindUnknown
   155  	}
   156  }
   157  
   158  func (s *storageInstance) Tag() names.Tag {
   159  	return s.StorageTag()
   160  }
   161  
   162  func (s *storageInstance) StorageTag() names.StorageTag {
   163  	return names.NewStorageTag(s.doc.Id)
   164  }
   165  
   166  func (s *storageInstance) Kind() StorageKind {
   167  	return s.doc.Kind
   168  }
   169  
   170  func (s *storageInstance) Owner() (names.Tag, bool) {
   171  	owner := s.maybeOwner()
   172  	return owner, owner != nil
   173  }
   174  
   175  func (s *storageInstance) maybeOwner() names.Tag {
   176  	if s.doc.Owner == "" {
   177  		return nil
   178  	}
   179  	tag, err := names.ParseTag(s.doc.Owner)
   180  	if err != nil {
   181  		// This should be impossible; we do not expose
   182  		// a means of modifying the owner tag.
   183  		panic(err)
   184  	}
   185  	return tag
   186  }
   187  
   188  func (s *storageInstance) StorageName() string {
   189  	return s.doc.StorageName
   190  }
   191  
   192  func (s *storageInstance) Life() Life {
   193  	return s.doc.Life
   194  }
   195  
   196  func (s *storageInstance) Pool() string {
   197  	return s.doc.Constraints.Pool
   198  }
   199  
   200  // entityStorageRefcountKey returns a key for refcounting charm storage
   201  // for a specific entity. Each time a storage instance is created, the
   202  // named store's refcount is incremented; and decremented when removed.
   203  func entityStorageRefcountKey(owner names.Tag, storageName string) string {
   204  	return fmt.Sprintf("storage#%s#%s", owner.String(), storageName)
   205  }
   206  
   207  // storageInstanceDoc describes a charm storage instance.
   208  type storageInstanceDoc struct {
   209  	DocID     string `bson:"_id"`
   210  	ModelUUID string `bson:"model-uuid"`
   211  
   212  	Id              string                     `bson:"id"`
   213  	Kind            StorageKind                `bson:"storagekind"`
   214  	Life            Life                       `bson:"life"`
   215  	Releasing       bool                       `bson:"releasing,omitempty"`
   216  	Owner           string                     `bson:"owner,omitempty"`
   217  	StorageName     string                     `bson:"storagename"`
   218  	AttachmentCount int                        `bson:"attachmentcount"`
   219  	Constraints     storageInstanceConstraints `bson:"constraints"`
   220  }
   221  
   222  // storageInstanceConstraints contains a subset of StorageConstraints,
   223  // for a single storage instance.
   224  type storageInstanceConstraints struct {
   225  	Pool string `bson:"pool"`
   226  	Size uint64 `bson:"size"`
   227  }
   228  
   229  type storageAttachment struct {
   230  	doc storageAttachmentDoc
   231  }
   232  
   233  func (s *storageAttachment) StorageInstance() names.StorageTag {
   234  	return names.NewStorageTag(s.doc.StorageInstance)
   235  }
   236  
   237  func (s *storageAttachment) Unit() names.UnitTag {
   238  	return names.NewUnitTag(s.doc.Unit)
   239  }
   240  
   241  func (s *storageAttachment) Life() Life {
   242  	return s.doc.Life
   243  }
   244  
   245  // storageAttachmentDoc describes a unit's attachment to a charm storage
   246  // instance.
   247  type storageAttachmentDoc struct {
   248  	DocID     string `bson:"_id"`
   249  	ModelUUID string `bson:"model-uuid"`
   250  
   251  	Unit            string `bson:"unitid"`
   252  	StorageInstance string `bson:"storageid"`
   253  	Life            Life   `bson:"life"`
   254  }
   255  
   256  // newStorageInstanceId returns a unique storage instance name. The name
   257  // incorporates the storage name as defined in the charm storage metadata,
   258  // and a unique sequence number.
   259  func newStorageInstanceId(mb modelBackend, store string) (string, error) {
   260  	seq, err := sequence(mb, "stores")
   261  	if err != nil {
   262  		return "", errors.Trace(err)
   263  	}
   264  	return fmt.Sprintf("%s/%v", store, seq), nil
   265  }
   266  
   267  func storageAttachmentId(unit string, storageInstanceId string) string {
   268  	return fmt.Sprintf("%s#%s", unitGlobalKey(unit), storageInstanceId)
   269  }
   270  
   271  func (sb *storageBackend) registry() (storage.ProviderRegistry, error) {
   272  	sb.registryOnce.Do(sb.registryInit)
   273  	return sb.spRegistry, sb.spRegistryErr
   274  }
   275  
   276  // StorageInstance returns the StorageInstance with the specified tag.
   277  func (sb *storageBackend) StorageInstance(tag names.StorageTag) (StorageInstance, error) {
   278  	s, err := sb.storageInstance(tag)
   279  	return s, err
   280  }
   281  
   282  func (sb *storageBackend) storageInstance(tag names.StorageTag) (*storageInstance, error) {
   283  	storageInstances, cleanup := sb.mb.db().GetCollection(storageInstancesC)
   284  	defer cleanup()
   285  
   286  	s := storageInstance{sb: sb}
   287  	err := storageInstances.FindId(tag.Id()).One(&s.doc)
   288  	if err == mgo.ErrNotFound {
   289  		return nil, errors.NotFoundf("storage instance %q", tag.Id())
   290  	} else if err != nil {
   291  		return nil, errors.Annotate(err, "cannot get storage instance details")
   292  	}
   293  	return &s, nil
   294  }
   295  
   296  // AllStorageInstances lists all storage instances currently in state
   297  // for this Juju model.
   298  func (sb *storageBackend) AllStorageInstances() ([]StorageInstance, error) {
   299  	storageInstances, err := sb.storageInstances(nil)
   300  	if err != nil {
   301  		return nil, errors.Trace(err)
   302  	}
   303  	out := make([]StorageInstance, len(storageInstances))
   304  	for i, s := range storageInstances {
   305  		out[i] = s
   306  	}
   307  	return out, nil
   308  }
   309  
   310  // RemoveStoragePool removes a pool only if its not currently in use
   311  func (sb *storageBackend) RemoveStoragePool(poolName string) error {
   312  	storageCollection, closer := sb.mb.db().GetCollection(storageInstancesC)
   313  	defer closer()
   314  
   315  	// TODO: Improve the data model to have a count of in use pools so we can
   316  	// make these checks as an assert and not queries.
   317  	var inUse bool
   318  	cfg, err := sb.config()
   319  	if err != nil {
   320  		return errors.Trace(err)
   321  	}
   322  	operatorStorage, ok := cfg.AllAttrs()[k8sconstants.OperatorStorageKey]
   323  	if sb.modelType == ModelTypeCAAS && ok && operatorStorage == poolName {
   324  		apps, err := sb.allApplications()
   325  		if err != nil {
   326  			return errors.Trace(err)
   327  		}
   328  		inUse = len(apps) > 0
   329  	} else {
   330  		query := bson.D{{"constraints.pool", bson.D{{"$eq", poolName}}}}
   331  		pools, err := storageCollection.Find(query).Count()
   332  		if err != nil {
   333  			return errors.Trace(err)
   334  		}
   335  		inUse = pools > 0
   336  	}
   337  	if inUse {
   338  		return errors.Errorf("storage pool %q in use", poolName)
   339  	}
   340  
   341  	registry, err := sb.registry()
   342  	if err != nil {
   343  		return errors.Trace(err)
   344  	}
   345  	pm := poolmanager.New(sb.settings, registry)
   346  	return pm.Delete(poolName)
   347  }
   348  
   349  func (sb *storageBackend) storageInstances(query bson.D) (storageInstances []*storageInstance, err error) {
   350  	storageCollection, closer := sb.mb.db().GetCollection(storageInstancesC)
   351  	defer closer()
   352  
   353  	sdocs := []storageInstanceDoc{}
   354  	err = storageCollection.Find(query).All(&sdocs)
   355  	if err != nil {
   356  		return nil, errors.Annotate(err, "cannot get storage instances")
   357  	}
   358  	for _, doc := range sdocs {
   359  		storageInstances = append(storageInstances, &storageInstance{sb, doc})
   360  	}
   361  	return storageInstances, nil
   362  }
   363  
   364  // DestroyStorageInstance ensures that the storage instance will be removed at
   365  // some point, after the cloud storage resources have been destroyed.
   366  //
   367  // If "destroyAttachments" is true, then DestroyStorageInstance will destroy
   368  // any attachments first; if there are no attachments, then the storage instance
   369  // is removed immediately. If "destroyAttached" is instead false and there are
   370  // existing storage attachments, then DestroyStorageInstance will return an error
   371  // satisfying StorageAttachedError.
   372  func (sb *storageBackend) DestroyStorageInstance(tag names.StorageTag, destroyAttachments bool, force bool, maxWait time.Duration) (err error) {
   373  	defer errors.DeferredAnnotatef(&err, "cannot destroy storage %q", tag.Id())
   374  	return sb.destroyStorageInstance(tag, destroyAttachments, false, force, maxWait)
   375  }
   376  
   377  // ReleaseStorageInstance ensures that the storage instance will be removed at
   378  // some point, without destroying the cloud storage resources.
   379  //
   380  // If "destroyAttachments" is true, then DestroyStorageInstance will destroy
   381  // any attachments first; if there are no attachments, then the storage instance
   382  // is removed immediately. If "destroyAttached" is instead false and there are
   383  // existing storage attachments, then ReleaseStorageInstance will return an error
   384  // satisfying StorageAttachedError.
   385  func (sb *storageBackend) ReleaseStorageInstance(tag names.StorageTag, destroyAttachments bool, force bool, maxWait time.Duration) (err error) {
   386  	defer errors.DeferredAnnotatef(&err, "cannot release storage %q", tag.Id())
   387  	return sb.destroyStorageInstance(tag, destroyAttachments, true, force, maxWait)
   388  }
   389  
   390  func (sb *storageBackend) destroyStorageInstance(
   391  	tag names.StorageTag,
   392  	destroyAttachments bool,
   393  	releaseMachineStorage bool,
   394  	force bool,
   395  	maxWait time.Duration,
   396  ) (err error) {
   397  	buildTxn := func(attempt int) ([]txn.Op, error) {
   398  		s, err := sb.storageInstance(tag)
   399  		if errors.IsNotFound(err) && attempt > 0 {
   400  			// On the first attempt, we expect it to exist.
   401  			return nil, jujutxn.ErrNoOperations
   402  		} else if err != nil {
   403  			return nil, errors.Trace(err)
   404  		}
   405  		switch ops, err := sb.destroyStorageInstanceOps(s, destroyAttachments, releaseMachineStorage, force, maxWait); err {
   406  		case errAlreadyDying:
   407  			return nil, jujutxn.ErrNoOperations
   408  		case nil:
   409  			return ops, nil
   410  		default:
   411  			logger.Warningf("could not destroy storage instance %v: %v", tag.Id(), err)
   412  			return nil, errors.Trace(err)
   413  		}
   414  	}
   415  	return sb.mb.db().Run(buildTxn)
   416  }
   417  
   418  func (sb *storageBackend) destroyStorageInstanceOps(
   419  	s *storageInstance,
   420  	destroyAttachments bool,
   421  	releaseStorage bool,
   422  	force bool,
   423  	maxWait time.Duration,
   424  ) ([]txn.Op, error) {
   425  	if s.doc.Life == Dying {
   426  		if !force {
   427  			return nil, errAlreadyDying
   428  		}
   429  	}
   430  	lifeAssert := isAliveDoc
   431  	if force {
   432  		// Since we are force destroying, life assert should be current storage instance's life.
   433  		lifeAssert = bson.D{{"life", s.doc.Life}}
   434  	}
   435  	if s.doc.AttachmentCount == 0 {
   436  		// There are no attachments remaining, so we can
   437  		// remove the storage instance immediately.
   438  		hasNoAttachments := bson.D{{"attachmentcount", 0}}
   439  		assert := append(hasNoAttachments, lifeAssert...)
   440  		s.doc.Releasing = releaseStorage
   441  		return removeStorageInstanceOps(s, assert, force)
   442  	}
   443  	if !destroyAttachments {
   444  		// There are storage attachments, and we've been instructed
   445  		// not to destroy them.
   446  		return nil, stateerrors.StorageAttachedError
   447  	}
   448  
   449  	// Check that removing the storage from its owner (if any) is permitted.
   450  	owner := s.maybeOwner()
   451  	var validateRemoveOps []txn.Op
   452  	var ownerAssert bson.DocElem
   453  	if owner != nil {
   454  		var err error
   455  		validateRemoveOps, err = validateRemoveOwnerStorageInstanceOps(s)
   456  		if err != nil {
   457  			return nil, errors.Trace(err)
   458  		}
   459  		ownerAssert = bson.DocElem{"owner", owner.String()}
   460  	} else {
   461  		ownerAssert = bson.DocElem{"owner", bson.D{{"$exists", false}}}
   462  	}
   463  
   464  	if releaseStorage {
   465  		if err := checkStoragePoolReleasable(sb, s.Pool()); err != nil {
   466  			return nil, errors.Trace(err)
   467  		}
   468  	}
   469  
   470  	// There are still attachments: the storage instance will be removed
   471  	// when the last attachment is removed. We schedule a cleanup to destroy
   472  	// attachments.
   473  	notLastRefs := append(bson.D{{"attachmentcount", bson.D{{"$gt", 0}}}}, lifeAssert...)
   474  	setFields := bson.D{{"life", Dying}}
   475  	if releaseStorage {
   476  		setFields = append(setFields, bson.DocElem{"releasing", true})
   477  	}
   478  	update := bson.D{{"$set", setFields}}
   479  	ops := []txn.Op{
   480  		newCleanupOp(cleanupAttachmentsForDyingStorage, s.doc.Id, force, maxWait),
   481  	}
   482  	ops = append(ops, validateRemoveOps...)
   483  	ops = append(ops, txn.Op{
   484  		C:      storageInstancesC,
   485  		Id:     s.doc.Id,
   486  		Assert: append(notLastRefs, ownerAssert),
   487  		Update: update,
   488  	})
   489  	return ops, nil
   490  }
   491  
   492  func checkStoragePoolReleasable(im *storageBackend, pool string) error {
   493  	providerType, aProvider, _, err := poolStorageProvider(im, pool)
   494  	if err != nil {
   495  		return errors.Trace(err)
   496  	}
   497  	if !aProvider.Releasable() {
   498  		return errors.Errorf(
   499  			"storage provider %q does not support releasing storage",
   500  			providerType,
   501  		)
   502  	}
   503  	return nil
   504  }
   505  
   506  // removeStorageInstanceOps removes the storage instance with the given
   507  // tag from state, if the specified assertions hold true.
   508  func removeStorageInstanceOps(si *storageInstance, assert bson.D, force bool) ([]txn.Op, error) {
   509  	// Remove the storage instance document, ensuring the owner does not
   510  	// change from what's passed in.
   511  	owner := si.maybeOwner()
   512  	var ownerAssert bson.DocElem
   513  	if owner != nil {
   514  		ownerAssert = bson.DocElem{"owner", owner.String()}
   515  	} else {
   516  		ownerAssert = bson.DocElem{"owner", bson.D{{"$exists", false}}}
   517  	}
   518  	ops := []txn.Op{{
   519  		C:      storageInstancesC,
   520  		Id:     si.doc.Id,
   521  		Assert: append(assert, ownerAssert),
   522  		Remove: true,
   523  	}}
   524  	if owner != nil {
   525  		// Ensure that removing the storage will not violate the
   526  		// owner's charm storage requirements.
   527  		validateRemoveOps, err := validateRemoveOwnerStorageInstanceOps(si)
   528  		if err != nil {
   529  			if !force {
   530  				return nil, errors.Trace(err)
   531  			}
   532  			logger.Warningf("could not validate owner for storage instance %v during remove: %v", si.StorageTag().Id(), err)
   533  		}
   534  		ops = append(ops, validateRemoveOps...)
   535  
   536  		// Decrement the owner's count for the storage name, freeing
   537  		// up a slot for a new storage instance to be attached.
   538  		decrefOp, err := decrefEntityStorageOp(si.sb.mb, owner, si.StorageName())
   539  		if err != nil {
   540  			if !force {
   541  				return nil, errors.Trace(err)
   542  			}
   543  			logger.Warningf("could not decrement owner count for storage instance %v during remove: %v", si.StorageTag().Id(), err)
   544  		} else {
   545  			ops = append(ops, decrefOp)
   546  		}
   547  	}
   548  
   549  	machineStorageOp := func(c string, id string) txn.Op {
   550  		return txn.Op{
   551  			C:      c,
   552  			Id:     id,
   553  			Assert: bson.D{{"storageid", si.doc.Id}},
   554  			Update: bson.D{{"$unset", bson.D{{"storageid", nil}}}},
   555  		}
   556  	}
   557  
   558  	// Destroy any assigned volume/filesystem, and clear the storage
   559  	// reference to avoid a dangling pointer while the volume/filesystem
   560  	// is being destroyed.
   561  	var haveFilesystem bool
   562  	filesystem, err := si.sb.storageInstanceFilesystem(si.StorageTag())
   563  	if err == nil {
   564  		ops = append(ops, machineStorageOp(
   565  			filesystemsC, filesystem.Tag().Id(),
   566  		))
   567  		fsOps, err := destroyFilesystemOps(si.sb, filesystem, si.doc.Releasing, force, nil)
   568  		if err != nil {
   569  			if !force {
   570  				return nil, errors.Trace(err)
   571  			}
   572  			logger.Warningf("could not get operations to destroy filesystem %v when removing storage instance %v: %v", filesystem.FilesystemTag().Id(), si.StorageTag().Id(), err)
   573  		}
   574  		ops = append(ops, fsOps...)
   575  		haveFilesystem = true
   576  	} else if !errors.IsNotFound(err) {
   577  		if !force {
   578  			return nil, errors.Trace(err)
   579  		}
   580  		logger.Warningf("could not get filesystem when removing storage instance %v: %v", si.StorageTag().Id(), err)
   581  	}
   582  	volume, err := si.sb.storageInstanceVolume(si.StorageTag())
   583  	if err == nil {
   584  		ops = append(ops, machineStorageOp(
   585  			volumesC, volume.Tag().Id(),
   586  		))
   587  		// If the storage instance has a filesystem, it may also
   588  		// have a volume (i.e. for volume-backed filesystem). In
   589  		// this case, we want to destroy only the filesystem; when
   590  		// the filesystem is removed, the volume will be destroyed.
   591  		if !haveFilesystem {
   592  			volOps, err := destroyVolumeOps(si.sb, volume, si.doc.Releasing, force, nil)
   593  			if err != nil {
   594  				if !force {
   595  					return nil, errors.Trace(err)
   596  				}
   597  				logger.Warningf("could not get operations to destroy volume %v when removing storage instance %v: %v", volume.Tag().Id(), si.StorageTag().Id(), err)
   598  			}
   599  			ops = append(ops, volOps...)
   600  		}
   601  	} else if !errors.IsNotFound(err) {
   602  		if !force {
   603  			return nil, errors.Trace(err)
   604  		}
   605  		logger.Warningf("could not get volume when removing storage instance %v: %v", si.StorageTag().Id(), err)
   606  	}
   607  	return ops, nil
   608  }
   609  
   610  // validateRemoveOwnerStorageInstanceOps checks that the given storage
   611  // instance can be removed from its current owner, returning txn.Ops to
   612  // ensure the same in a transaction. If the owner is not alive, then charm
   613  // storage requirements are ignored.
   614  func validateRemoveOwnerStorageInstanceOps(si *storageInstance) ([]txn.Op, error) {
   615  	var ops []txn.Op
   616  	var charmMeta *charm.Meta
   617  	owner := si.maybeOwner()
   618  	switch owner.Kind() {
   619  	case names.ApplicationTagKind:
   620  		app, err := si.sb.application(owner.Id())
   621  		if err != nil {
   622  			return nil, errors.Trace(err)
   623  		}
   624  		if app.Life() != Alive {
   625  			return nil, nil
   626  		}
   627  		ch, _, err := app.Charm()
   628  		if err != nil {
   629  			return nil, errors.Trace(err)
   630  		}
   631  		charmMeta = ch.Meta()
   632  		ops = append(ops, txn.Op{
   633  			C:  applicationsC,
   634  			Id: app.Name(),
   635  			Assert: bson.D{
   636  				{"life", Alive},
   637  				{"charmurl", ch.URL()},
   638  			},
   639  		})
   640  	case names.UnitTagKind:
   641  		u, err := si.sb.unit(owner.Id())
   642  		if err != nil {
   643  			return nil, errors.Trace(err)
   644  		}
   645  		if u.Life() != Alive {
   646  			return nil, nil
   647  		}
   648  		ch, err := u.charm()
   649  		if err != nil {
   650  			return nil, errors.Trace(err)
   651  		}
   652  		charmMeta = ch.Meta()
   653  		ops = append(ops, txn.Op{
   654  			C:      unitsC,
   655  			Id:     u.doc.Name,
   656  			Assert: bson.D{{"life", Alive}},
   657  		})
   658  		ops = append(ops, u.assertCharmOps(ch)...)
   659  	default:
   660  		return nil, errors.Errorf(
   661  			"invalid storage owner %s",
   662  			names.ReadableString(owner),
   663  		)
   664  	}
   665  	_, currentCountOp, err := validateStorageCountChange(
   666  		si.sb, owner, si.StorageName(), -1, charmMeta,
   667  	)
   668  	if err != nil {
   669  		return nil, errors.Trace(err)
   670  	}
   671  	ops = append(ops, currentCountOp)
   672  	return ops, nil
   673  }
   674  
   675  // validateStorageCountChange validates the desired storage count change,
   676  // and returns the current storage count, and a txn.Op that ensures the
   677  // current storage count does not change before the transaction is executed.
   678  func validateStorageCountChange(
   679  	im *storageBackend, owner names.Tag,
   680  	storageName string, n int,
   681  	charmMeta *charm.Meta,
   682  ) (current int, _ txn.Op, _ error) {
   683  	currentCountOp, currentCount, err := im.countEntityStorageInstances(owner, storageName)
   684  	if err != nil {
   685  		return -1, txn.Op{}, errors.Trace(err)
   686  	}
   687  	charmStorage := charmMeta.Storage[storageName]
   688  	if err := validateCharmStorageCountChange(charmStorage, currentCount, n); err != nil {
   689  		return -1, txn.Op{}, errors.Trace(err)
   690  	}
   691  	return currentCount, currentCountOp, nil
   692  }
   693  
   694  // increfEntityStorageOp returns a txn.Op that increments the reference
   695  // count for a storage instance for a given application or unit. This
   696  // should be called when creating a shared storage instance, or when
   697  // attaching a non-shared storage instance to a unit.
   698  func increfEntityStorageOp(mb modelBackend, owner names.Tag, storageName string, n int) (txn.Op, error) {
   699  	refcounts, closer := mb.db().GetCollection(refcountsC)
   700  	defer closer()
   701  	storageRefcountKey := entityStorageRefcountKey(owner, storageName)
   702  	incRefOp, err := nsRefcounts.CreateOrIncRefOp(refcounts, storageRefcountKey, n)
   703  	return incRefOp, errors.Trace(err)
   704  }
   705  
   706  // decrefEntityStorageOp returns a txn.Op that decrements the reference
   707  // count for a storage instance from a given application or unit. This
   708  // should be called when removing a shared storage instance, or when
   709  // detaching a non-shared storage instance from a unit.
   710  func decrefEntityStorageOp(mb modelBackend, owner names.Tag, storageName string) (txn.Op, error) {
   711  	refcounts, closer := mb.db().GetCollection(refcountsC)
   712  	defer closer()
   713  	storageRefcountKey := entityStorageRefcountKey(owner, storageName)
   714  	decRefOp, _, err := nsRefcounts.DyingDecRefOp(refcounts, storageRefcountKey)
   715  	if err != nil {
   716  		return txn.Op{}, errors.Trace(err)
   717  	}
   718  	return decRefOp, nil
   719  }
   720  
   721  // machineAssignable is used by createStorageOps to determine what machine
   722  // storage needs to be created. This is implemented by Unit.
   723  type machineAssignable interface {
   724  	machine() (*Machine, error)
   725  	noAssignedMachineOp() txn.Op
   726  }
   727  
   728  // createStorageOps returns txn.Ops for creating storage instances
   729  // and attachments for the newly created unit or application. A map
   730  // of storage names to number of storage instances created will
   731  // be returned, along with the total number of storage attachments
   732  // made. These should be used to initialise or update refcounts.
   733  //
   734  // The entity tag identifies the entity that owns the storage instance
   735  // either a unit or a application. Shared storage instances are owned by a
   736  // application, and non-shared storage instances are owned by a unit.
   737  //
   738  // The charm metadata corresponds to the charm that the owner (application/unit)
   739  // is or will be running, and is used to extract storage constraints,
   740  // default values, etc.
   741  //
   742  // The supplied storage constraints are constraints for the storage
   743  // instances to be created, keyed on the storage name. These constraints
   744  // will be correlated with the charm storage metadata for validation
   745  // and supplementing.
   746  //
   747  // maybeMachineAssignable may be nil, or an machineAssignable which
   748  // describes the entity's machine assignment. If the entity is assigned
   749  // to a machine, then machine storage will be created.
   750  func createStorageOps(
   751  	sb *storageBackend,
   752  	entityTag names.Tag,
   753  	charmMeta *charm.Meta,
   754  	cons map[string]StorageConstraints,
   755  	osname string,
   756  	maybeMachineAssignable machineAssignable,
   757  ) (ops []txn.Op, storageTags map[string][]names.StorageTag, numStorageAttachments int, err error) {
   758  
   759  	fail := func(err error) ([]txn.Op, map[string][]names.StorageTag, int, error) {
   760  		return nil, nil, -1, err
   761  	}
   762  
   763  	type template struct {
   764  		storageName string
   765  		meta        charm.Storage
   766  		cons        StorageConstraints
   767  	}
   768  
   769  	createdShared := false
   770  	switch entityTag := entityTag.(type) {
   771  	case names.ApplicationTag:
   772  		createdShared = true
   773  	case names.UnitTag:
   774  	default:
   775  		return fail(errors.Errorf("expected application or unit tag, got %T", entityTag))
   776  	}
   777  
   778  	// Create storage instances in order of name, to simplify testing.
   779  	storageNames := set.NewStrings()
   780  	for name := range cons {
   781  		storageNames.Add(name)
   782  	}
   783  
   784  	templates := make([]template, 0, len(cons))
   785  	for _, store := range storageNames.SortedValues() {
   786  		cons := cons[store]
   787  		charmStorage, ok := charmMeta.Storage[store]
   788  		if !ok {
   789  			return fail(errors.NotFoundf("charm storage %q", store))
   790  		}
   791  		if cons.Count == 0 {
   792  			continue
   793  		}
   794  		if createdShared != charmStorage.Shared {
   795  			// applications only get shared storage instances,
   796  			// units only get non-shared storage instances.
   797  			continue
   798  		}
   799  		templates = append(templates, template{
   800  			storageName: store,
   801  			meta:        charmStorage,
   802  			cons:        cons,
   803  		})
   804  	}
   805  
   806  	storageTags = make(map[string][]names.StorageTag)
   807  	ops = make([]txn.Op, 0, len(templates)*3)
   808  	for _, t := range templates {
   809  		owner := entityTag.String()
   810  		var kind StorageKind
   811  		switch t.meta.Type {
   812  		case charm.StorageBlock:
   813  			kind = StorageKindBlock
   814  		case charm.StorageFilesystem:
   815  			kind = StorageKindFilesystem
   816  		default:
   817  			return fail(errors.Errorf("unknown storage type %q", t.meta.Type))
   818  		}
   819  
   820  		for i := uint64(0); i < t.cons.Count; i++ {
   821  			cons := cons[t.storageName]
   822  			id, err := newStorageInstanceId(sb.mb, t.storageName)
   823  			if err != nil {
   824  				return fail(errors.Annotate(err, "cannot generate storage instance name"))
   825  			}
   826  			storageTag := names.NewStorageTag(id)
   827  			storageTags[t.storageName] = append(storageTags[t.storageName], storageTag)
   828  			doc := &storageInstanceDoc{
   829  				Id:          id,
   830  				Kind:        kind,
   831  				Owner:       owner,
   832  				StorageName: t.storageName,
   833  				Constraints: storageInstanceConstraints{
   834  					Pool: cons.Pool,
   835  					Size: cons.Size,
   836  				},
   837  			}
   838  			var hostStorageOps []txn.Op
   839  			if unitTag, ok := entityTag.(names.UnitTag); ok {
   840  				doc.AttachmentCount = 1
   841  				ops = append(ops, createStorageAttachmentOp(storageTag, unitTag))
   842  				numStorageAttachments++
   843  				storageInstance := &storageInstance{sb, *doc}
   844  
   845  				if maybeMachineAssignable != nil {
   846  					var err error
   847  					hostStorageOps, err = unitAssignedMachineStorageOps(
   848  						sb, charmMeta, osname,
   849  						storageInstance,
   850  						maybeMachineAssignable,
   851  					)
   852  					if err != nil {
   853  						return fail(errors.Annotatef(
   854  							err, "creating machine storage for storage %s", id,
   855  						))
   856  					}
   857  				}
   858  
   859  				// For CAAS models, we create the storage with the unit
   860  				// as there's no machine for the unit to be assigned to.
   861  				if sb.modelType == ModelTypeCAAS {
   862  					storageParams, err := storageParamsForStorageInstance(
   863  						sb, charmMeta, osname, storageInstance,
   864  					)
   865  					if err != nil {
   866  						return fail(errors.Trace(err))
   867  					}
   868  					// TODO(caas) - validate storage dynamic pools just in case
   869  					if hostStorageOps, _, _, err = sb.hostStorageOps(unitTag.Id(), storageParams); err != nil {
   870  						return fail(errors.Trace(err))
   871  					}
   872  				}
   873  			}
   874  			ops = append(ops, txn.Op{
   875  				C:      storageInstancesC,
   876  				Id:     id,
   877  				Assert: txn.DocMissing,
   878  				Insert: doc,
   879  			})
   880  			ops = append(ops, hostStorageOps...)
   881  		}
   882  	}
   883  
   884  	// TODO(axw) create storage attachments for each shared storage
   885  	// instance owned by the application.
   886  	//
   887  	// TODO(axw) prevent creation of shared storage after application
   888  	// creation, because the only sane time to add storage attachments
   889  	// is when units are added to said application.
   890  
   891  	return ops, storageTags, numStorageAttachments, nil
   892  }
   893  
   894  // unitAssignedMachineStorageOps returns ops for creating volumes, filesystems
   895  // and their attachments to the machine that the specified unit is assigned to,
   896  // corresponding to the specified storage instance.
   897  //
   898  // If the unit is not assigned to a machine, then ops will be returned to assert
   899  // this, and no error will be returned.
   900  func unitAssignedMachineStorageOps(
   901  	sb *storageBackend,
   902  	charmMeta *charm.Meta,
   903  	osname string,
   904  	storage *storageInstance,
   905  	machineAssignable machineAssignable,
   906  ) (ops []txn.Op, err error) {
   907  	m, err := machineAssignable.machine()
   908  	if err != nil {
   909  		if errors.IsNotAssigned(err) {
   910  			// The unit is not assigned to a machine; return
   911  			// txn.Op that ensures that this remains the case
   912  			// until the transaction is committed.
   913  			return []txn.Op{machineAssignable.noAssignedMachineOp()}, nil
   914  		}
   915  		return nil, errors.Trace(err)
   916  	}
   917  
   918  	storageParams, err := storageParamsForStorageInstance(
   919  		sb, charmMeta, osname, storage,
   920  	)
   921  	if err != nil {
   922  		return nil, errors.Trace(err)
   923  	}
   924  	if err := validateDynamicMachineStorageParams(m, storageParams); err != nil {
   925  		return nil, errors.Trace(err)
   926  	}
   927  	storageOps, volumeAttachments, filesystemAttachments, err := sb.hostStorageOps(
   928  		m.doc.Id, storageParams,
   929  	)
   930  	if err != nil {
   931  		return nil, errors.Trace(err)
   932  	}
   933  	attachmentOps, err := addMachineStorageAttachmentsOps(
   934  		m, volumeAttachments, filesystemAttachments,
   935  	)
   936  	if err != nil {
   937  		return nil, errors.Trace(err)
   938  	}
   939  	storageOps = append(storageOps, attachmentOps...)
   940  	return storageOps, nil
   941  }
   942  
   943  // createStorageAttachmentOps returns a txn.Op for creating a storage attachment.
   944  // The caller is responsible for updating the attachmentcount field of the storage
   945  // instance.
   946  func createStorageAttachmentOp(storage names.StorageTag, unit names.UnitTag) txn.Op {
   947  	return txn.Op{
   948  		C:      storageAttachmentsC,
   949  		Id:     storageAttachmentId(unit.Id(), storage.Id()),
   950  		Assert: txn.DocMissing,
   951  		Insert: &storageAttachmentDoc{
   952  			Unit:            unit.Id(),
   953  			StorageInstance: storage.Id(),
   954  		},
   955  	}
   956  }
   957  
   958  // StorageAttachments returns the StorageAttachments for the specified storage
   959  // instance.
   960  func (sb *storageBackend) StorageAttachments(storage names.StorageTag) ([]StorageAttachment, error) {
   961  	query := bson.D{{"storageid", storage.Id()}}
   962  	attachments, err := sb.storageAttachments(query)
   963  	if err != nil {
   964  		return nil, errors.Annotatef(err, "cannot get storage attachments for storage %s", storage.Id())
   965  	}
   966  	return attachments, nil
   967  }
   968  
   969  // UnitStorageAttachments returns the StorageAttachments for the specified unit.
   970  func (sb *storageBackend) UnitStorageAttachments(unit names.UnitTag) ([]StorageAttachment, error) {
   971  	query := bson.D{{"unitid", unit.Id()}}
   972  	attachments, err := sb.storageAttachments(query)
   973  	if err != nil {
   974  		return nil, errors.Annotatef(err, "cannot get storage attachments for unit %s", unit.Id())
   975  	}
   976  	return attachments, nil
   977  }
   978  
   979  func (sb *storageBackend) storageAttachments(query bson.D) ([]StorageAttachment, error) {
   980  	coll, closer := sb.mb.db().GetCollection(storageAttachmentsC)
   981  	defer closer()
   982  
   983  	var docs []storageAttachmentDoc
   984  	if err := coll.Find(query).All(&docs); err != nil {
   985  		return nil, err
   986  	}
   987  	storageAttachments := make([]StorageAttachment, len(docs))
   988  	for i, doc := range docs {
   989  		storageAttachments[i] = &storageAttachment{doc}
   990  	}
   991  	return storageAttachments, nil
   992  }
   993  
   994  // StorageAttachment returns the StorageAttachment with the specified tags.
   995  func (sb *storageBackend) StorageAttachment(storage names.StorageTag, unit names.UnitTag) (StorageAttachment, error) {
   996  	att, err := sb.storageAttachment(storage, unit)
   997  	if err != nil {
   998  		return nil, errors.Trace(err)
   999  	}
  1000  	return att, nil
  1001  }
  1002  
  1003  func (sb *storageBackend) storageAttachment(storage names.StorageTag, unit names.UnitTag) (*storageAttachment, error) {
  1004  	coll, closer := sb.mb.db().GetCollection(storageAttachmentsC)
  1005  	defer closer()
  1006  	var s storageAttachment
  1007  	err := coll.FindId(storageAttachmentId(unit.Id(), storage.Id())).One(&s.doc)
  1008  	if err == mgo.ErrNotFound {
  1009  		return nil, errors.NotFoundf("storage attachment %s:%s", storage.Id(), unit.Id())
  1010  	} else if err != nil {
  1011  		return nil, errors.Annotatef(err, "cannot get storage attachment %s:%s", storage.Id(), unit.Id())
  1012  	}
  1013  	return &s, nil
  1014  }
  1015  
  1016  // AttachStorage attaches storage to a unit, creating and attaching machine
  1017  // storage as necessary.
  1018  func (sb *storageBackend) AttachStorage(storage names.StorageTag, unit names.UnitTag) (err error) {
  1019  	defer errors.DeferredAnnotatef(&err,
  1020  		"cannot attach %s to %s",
  1021  		names.ReadableString(storage),
  1022  		names.ReadableString(unit),
  1023  	)
  1024  	buildTxn := func(attempt int) ([]txn.Op, error) {
  1025  		si, err := sb.storageInstance(storage)
  1026  		if err != nil {
  1027  			return nil, errors.Trace(err)
  1028  		}
  1029  		u, err := sb.unit(unit.Id())
  1030  		if err != nil {
  1031  			return nil, errors.Trace(err)
  1032  		}
  1033  		if u.Life() != Alive {
  1034  			return nil, errors.New("unit not alive")
  1035  		}
  1036  		ch, err := u.charm()
  1037  		if err != nil {
  1038  			return nil, errors.Annotate(err, "getting charm")
  1039  		}
  1040  		ops, err := sb.attachStorageOps(si, u.UnitTag(), u.Base().OS, ch, u)
  1041  		if errors.IsAlreadyExists(err) {
  1042  			return nil, jujutxn.ErrNoOperations
  1043  		}
  1044  		if err != nil {
  1045  			return nil, errors.Trace(err)
  1046  		}
  1047  		if si.doc.Owner == "" {
  1048  			// The storage instance will be owned by the unit, so we
  1049  			// must increment the unit's refcount for the storage name.
  1050  			//
  1051  			// Make sure that we *can* assign another storage instance
  1052  			// to the unit.
  1053  			_, currentCountOp, err := validateStorageCountChange(
  1054  				sb, u.UnitTag(), si.StorageName(), 1, ch.Meta(),
  1055  			)
  1056  			if err != nil {
  1057  				return nil, errors.Trace(err)
  1058  			}
  1059  			incRefOp, err := increfEntityStorageOp(sb.mb, u.UnitTag(), si.StorageName(), 1)
  1060  			if err != nil {
  1061  				return nil, errors.Trace(err)
  1062  			}
  1063  			ops = append(ops, currentCountOp, incRefOp)
  1064  		}
  1065  		ops = append(ops, txn.Op{
  1066  			C:      unitsC,
  1067  			Id:     u.doc.Name,
  1068  			Assert: isAliveDoc,
  1069  			Update: bson.D{{"$inc", bson.D{{"storageattachmentcount", 1}}}},
  1070  		})
  1071  		ops = append(ops, u.assertCharmOps(ch)...)
  1072  		return ops, nil
  1073  	}
  1074  	return sb.mb.db().Run(buildTxn)
  1075  }
  1076  
  1077  // attachStorageOps returns txn.Ops to attach a storage instance to the
  1078  // specified unit. The caller must ensure that the unit is in a state
  1079  // to attach the storage (i.e. it is Alive, or is being created).
  1080  //
  1081  // The caller is responsible for incrementing the storage refcount for
  1082  // the unit/storage name.
  1083  func (sb *storageBackend) attachStorageOps(
  1084  	si *storageInstance,
  1085  	unitTag names.UnitTag,
  1086  	osName string,
  1087  	ch *Charm,
  1088  	maybeMachineAssignable machineAssignable,
  1089  ) ([]txn.Op, error) {
  1090  	if si.Life() != Alive {
  1091  		return nil, errors.New("storage not alive")
  1092  	}
  1093  	unitApplicationName, err := names.UnitApplication(unitTag.Id())
  1094  	if err != nil {
  1095  		return nil, errors.Trace(err)
  1096  	}
  1097  	if owner, ok := si.Owner(); ok {
  1098  		if owner == unitTag {
  1099  			return nil, errors.AlreadyExistsf("storage attachment %q on %q", si.StorageTag().Id(), unitTag.Id())
  1100  		}
  1101  		if owner.Id() != unitApplicationName {
  1102  			return nil, errors.Errorf(
  1103  				"cannot attach storage owned by %s to %s",
  1104  				names.ReadableString(owner),
  1105  				names.ReadableString(unitTag),
  1106  			)
  1107  		}
  1108  		if _, err := sb.storageAttachment(
  1109  			si.StorageTag(),
  1110  			unitTag,
  1111  		); err == nil {
  1112  			return nil, errors.AlreadyExistsf("storage attachment %q on %q", si.StorageTag().Id(), unitTag.Id())
  1113  		} else if !errors.IsNotFound(err) {
  1114  			return nil, errors.Trace(err)
  1115  		}
  1116  	} else {
  1117  		// TODO(axw) should we store the application name on the
  1118  		// storage, and restrict attaching to only units of that
  1119  		// application?
  1120  	}
  1121  
  1122  	// Check that the unit's charm declares storage with the storage
  1123  	// instance's storage name.
  1124  	charmMeta := ch.Meta()
  1125  	if _, ok := charmMeta.Storage[si.StorageName()]; !ok {
  1126  		return nil, errors.Errorf(
  1127  			"charm %s has no storage called %s",
  1128  			charmMeta.Name, si.StorageName(),
  1129  		)
  1130  	}
  1131  
  1132  	// Create a storage attachment doc, ensuring that the storage instance
  1133  	// owner does not change, and that both the storage instance and unit
  1134  	// are alive. Increment the attachment count on both storage instance
  1135  	// and unit, and update the owner of the storage instance if necessary.
  1136  	siUpdate := bson.D{{"$inc", bson.D{{"attachmentcount", 1}}}}
  1137  	siAssert := isAliveDoc
  1138  	if si.doc.Owner != "" {
  1139  		siAssert = append(siAssert, bson.DocElem{"owner", si.doc.Owner})
  1140  	} else {
  1141  		siAssert = append(siAssert, bson.DocElem{"owner", bson.D{{"$exists", false}}})
  1142  		siUpdate = append(siUpdate, bson.DocElem{
  1143  			"$set", bson.D{{"owner", unitTag.String()}},
  1144  		})
  1145  	}
  1146  	ops := []txn.Op{{
  1147  		C:      storageInstancesC,
  1148  		Id:     si.doc.Id,
  1149  		Assert: siAssert,
  1150  		Update: siUpdate,
  1151  	},
  1152  		createStorageAttachmentOp(si.StorageTag(), unitTag),
  1153  	}
  1154  
  1155  	if maybeMachineAssignable != nil {
  1156  		machineStorageOps, err := unitAssignedMachineStorageOps(
  1157  			sb, charmMeta, osName, si,
  1158  			maybeMachineAssignable,
  1159  		)
  1160  		if err != nil {
  1161  			return nil, errors.Trace(err)
  1162  		}
  1163  		ops = append(ops, machineStorageOps...)
  1164  	}
  1165  
  1166  	// Attach volumes and filesystems for reattached storage on CAAS.
  1167  	if sb.modelType == ModelTypeCAAS {
  1168  		storageParams, err := storageParamsForStorageInstance(
  1169  			sb, charmMeta, osName, si,
  1170  		)
  1171  		if err != nil {
  1172  			return nil, errors.Trace(err)
  1173  		}
  1174  		// we should never be creating these here, but just to be sure.
  1175  		storageParams.filesystems = nil
  1176  		storageParams.volumes = nil
  1177  		hostStorageOps, _, _, err := sb.hostStorageOps(unitTag.Id(), storageParams)
  1178  		if err != nil {
  1179  			return nil, errors.Trace(err)
  1180  		}
  1181  		ops = append(ops, hostStorageOps...)
  1182  	}
  1183  
  1184  	return ops, nil
  1185  }
  1186  
  1187  // DestroyUnitStorageAttachments ensures that the existing storage
  1188  // attachments of the specified unit are removed at some point.
  1189  func (sb *storageBackend) DestroyUnitStorageAttachments(unit names.UnitTag) (err error) {
  1190  	defer errors.DeferredAnnotatef(&err, "cannot destroy unit %s storage attachments", unit.Id())
  1191  	buildTxn := func(attempt int) ([]txn.Op, error) {
  1192  		attachments, err := sb.UnitStorageAttachments(unit)
  1193  		if err != nil {
  1194  			return nil, errors.Trace(err)
  1195  		}
  1196  		ops := make([]txn.Op, 0, len(attachments))
  1197  		for _, attachment := range attachments {
  1198  			if attachment.Life() != Alive {
  1199  				continue
  1200  			}
  1201  			ops = append(ops, detachStorageOps(
  1202  				attachment.StorageInstance(), unit,
  1203  			)...)
  1204  		}
  1205  		if len(ops) == 0 {
  1206  			return nil, jujutxn.ErrNoOperations
  1207  		}
  1208  		return ops, nil
  1209  	}
  1210  	return sb.mb.db().Run(buildTxn)
  1211  }
  1212  
  1213  // DetachStorage ensures that the storage attachment will be
  1214  // removed at some point.
  1215  func (sb *storageBackend) DetachStorage(storage names.StorageTag, unit names.UnitTag, force bool, maxWait time.Duration) (err error) {
  1216  	defer errors.DeferredAnnotatef(&err, "cannot destroy storage attachment %s:%s", storage.Id(), unit.Id())
  1217  	buildTxn := func(attempt int) ([]txn.Op, error) {
  1218  		s, err := sb.storageAttachment(storage, unit)
  1219  		if errors.IsNotFound(err) && attempt > 0 {
  1220  			// On the first attempt, we expect it to exist.
  1221  			return nil, jujutxn.ErrNoOperations
  1222  		} else if err != nil {
  1223  			return nil, errors.Trace(err)
  1224  		}
  1225  		if s.doc.Life == Dying {
  1226  			return nil, jujutxn.ErrNoOperations
  1227  		}
  1228  		si, err := sb.storageInstance(storage)
  1229  		if err != nil {
  1230  			return nil, jujutxn.ErrNoOperations
  1231  		}
  1232  		var ops []txn.Op
  1233  		var ownerAssert bson.DocElem
  1234  		switch owner := si.maybeOwner(); owner {
  1235  		case nil:
  1236  			ownerAssert = bson.DocElem{Name: "owner", Value: bson.D{{"$exists", false}}}
  1237  		case unit:
  1238  			validateRemoveOps, err := validateRemoveOwnerStorageInstanceOps(si)
  1239  			if err != nil {
  1240  				return nil, errors.Trace(err)
  1241  			}
  1242  			ops = append(ops, validateRemoveOps...)
  1243  			fallthrough
  1244  		default:
  1245  			ownerAssert = bson.DocElem{Name: "owner", Value: si.doc.Owner}
  1246  		}
  1247  		ops = append(ops, txn.Op{
  1248  			C:      storageInstancesC,
  1249  			Id:     si.doc.Id,
  1250  			Assert: bson.D{ownerAssert},
  1251  		})
  1252  
  1253  		// Check if the unit is assigned to a machine, and if the
  1254  		// associated machine storage has been attached yet. If not,
  1255  		// we can short-circuit the removal of the storage attachment.
  1256  		var assert interface{}
  1257  		removeStorageAttachment := true
  1258  		u, err := sb.unit(unit.Id())
  1259  		if err != nil {
  1260  			return nil, errors.Trace(err)
  1261  		}
  1262  
  1263  		processAttachments := true
  1264  		var hostTag names.Tag = unit
  1265  		if u.ShouldBeAssigned() {
  1266  			machineId, err := u.AssignedMachineId()
  1267  			if errors.IsNotAssigned(err) {
  1268  				// The unit is not assigned to a machine, therefore
  1269  				// there can be no associated machine storage. It
  1270  				// is safe to remove.
  1271  				ops = append(ops, u.noAssignedMachineOp())
  1272  				processAttachments = false
  1273  			} else if err != nil {
  1274  				return nil, errors.Trace(err)
  1275  			}
  1276  			hostTag = names.NewMachineTag(machineId)
  1277  		}
  1278  		if processAttachments {
  1279  			volumeAttachment, filesystemAttachment, err := sb.storageHostAttachment(
  1280  				si, unit, hostTag,
  1281  			)
  1282  			if err != nil {
  1283  				return nil, errors.Trace(err)
  1284  			}
  1285  			if volumeAttachment != nil {
  1286  				var assert interface{}
  1287  				if _, err := volumeAttachment.Info(); err == nil {
  1288  					// The volume attachment has been provisioned,
  1289  					// so we cannot short-circuit the removal of
  1290  					// the storage attachment.
  1291  					removeStorageAttachment = false
  1292  					assert = txn.DocExists
  1293  				} else {
  1294  					assert = bson.D{{"info", bson.D{{"$exists", false}}}}
  1295  				}
  1296  				ops = append(ops, txn.Op{
  1297  					C: volumeAttachmentsC,
  1298  					Id: volumeAttachmentId(
  1299  						volumeAttachment.Host().Id(),
  1300  						volumeAttachment.Volume().Id(),
  1301  					),
  1302  					Assert: assert,
  1303  				})
  1304  			}
  1305  			if filesystemAttachment != nil {
  1306  				var assert interface{}
  1307  				if _, err := filesystemAttachment.Info(); err == nil {
  1308  					// The filesystem attachment has been provisioned,
  1309  					// so we cannot short-circuit the removal of
  1310  					// the storage attachment.
  1311  					removeStorageAttachment = false
  1312  					assert = txn.DocExists
  1313  				} else {
  1314  					assert = bson.D{{"info", bson.D{{"$exists", false}}}}
  1315  				}
  1316  				ops = append(ops, txn.Op{
  1317  					C: filesystemAttachmentsC,
  1318  					Id: filesystemAttachmentId(
  1319  						filesystemAttachment.Host().Id(),
  1320  						filesystemAttachment.Filesystem().Id(),
  1321  					),
  1322  					Assert: assert,
  1323  				})
  1324  			}
  1325  		}
  1326  		if removeStorageAttachment {
  1327  			// Short-circuit the removal of the storage attachment.
  1328  			return removeStorageAttachmentOps(sb, s, si, force, assert, ops...)
  1329  		}
  1330  		return append(ops, detachStorageOps(storage, unit)...), nil
  1331  	}
  1332  	return sb.mb.db().Run(buildTxn)
  1333  }
  1334  
  1335  func detachStorageOps(storage names.StorageTag, unit names.UnitTag) []txn.Op {
  1336  	ops := []txn.Op{{
  1337  		C:      storageAttachmentsC,
  1338  		Id:     storageAttachmentId(unit.Id(), storage.Id()),
  1339  		Assert: isAliveDoc,
  1340  		Update: bson.D{{"$set", bson.D{{"life", Dying}}}},
  1341  	}}
  1342  	return ops
  1343  }
  1344  
  1345  func (sb *storageBackend) storageHostAttachment(
  1346  	si *storageInstance,
  1347  	unitTag names.UnitTag,
  1348  	hostTag names.Tag,
  1349  ) (VolumeAttachment, FilesystemAttachment, error) {
  1350  	switch si.Kind() {
  1351  	case StorageKindBlock:
  1352  		volume, err := sb.storageInstanceVolume(si.StorageTag())
  1353  		if err != nil {
  1354  			return nil, nil, errors.Trace(err)
  1355  		}
  1356  		att, err := sb.VolumeAttachment(hostTag, volume.VolumeTag())
  1357  		if err != nil {
  1358  			return nil, nil, errors.Trace(err)
  1359  		}
  1360  		return att, nil, nil
  1361  
  1362  	case StorageKindFilesystem:
  1363  		filesystem, err := sb.storageInstanceFilesystem(si.StorageTag())
  1364  		if err != nil {
  1365  			return nil, nil, errors.Trace(err)
  1366  		}
  1367  		att, err := sb.FilesystemAttachment(hostTag, filesystem.FilesystemTag())
  1368  		if err != nil {
  1369  			return nil, nil, errors.Trace(err)
  1370  		}
  1371  		return nil, att, nil
  1372  
  1373  	default:
  1374  		return nil, nil, errors.Errorf("unknown storage type %q", si.Kind())
  1375  	}
  1376  }
  1377  
  1378  // RemoveStorageAttachment removes the storage attachment from state, and may
  1379  // remove its storage instance as well, if the storage instance is Dying and
  1380  // no other references to it exist.
  1381  // It will fail if the storage attachment is not Dying.
  1382  func (sb *storageBackend) RemoveStorageAttachment(storage names.StorageTag, unit names.UnitTag, force bool) (err error) {
  1383  	defer errors.DeferredAnnotatef(&err, "cannot remove storage attachment %s:%s", storage.Id(), unit.Id())
  1384  	buildTxn := func(attempt int) ([]txn.Op, error) {
  1385  		s, err := sb.storageAttachment(storage, unit)
  1386  		if errors.IsNotFound(err) && attempt > 0 {
  1387  			// On the first attempt, we expect it to exist.
  1388  			return nil, jujutxn.ErrNoOperations
  1389  		} else if err != nil {
  1390  			return nil, errors.Trace(err)
  1391  		}
  1392  		if s.doc.Life != Dying {
  1393  			// TODO (anastasiamac 2019-04-05) We might want to ignore this when forcing...
  1394  			return nil, errors.New("storage attachment is not dying")
  1395  		}
  1396  		inst, err := sb.storageInstance(storage)
  1397  		if errors.IsNotFound(err) {
  1398  			// This implies that the attachment was removed
  1399  			// after the call to st.storageAttachment.
  1400  			return nil, jujutxn.ErrNoOperations
  1401  		} else if err != nil {
  1402  			return nil, errors.Trace(err)
  1403  		}
  1404  		ops, err := removeStorageAttachmentOps(sb, s, inst, force, bson.D{{"life", Dying}})
  1405  		if err != nil {
  1406  			return nil, errors.Trace(err)
  1407  		}
  1408  		return ops, nil
  1409  	}
  1410  	return sb.mb.db().Run(buildTxn)
  1411  }
  1412  
  1413  func removeStorageAttachmentOps(
  1414  	im *storageBackend,
  1415  	s *storageAttachment,
  1416  	si *storageInstance,
  1417  	force bool,
  1418  	assert interface{},
  1419  	baseOps ...txn.Op,
  1420  ) ([]txn.Op, error) {
  1421  	ops := append(baseOps, txn.Op{
  1422  		C:      storageAttachmentsC,
  1423  		Id:     storageAttachmentId(s.doc.Unit, s.doc.StorageInstance),
  1424  		Assert: assert,
  1425  		Remove: true,
  1426  	}, txn.Op{
  1427  		C:      unitsC,
  1428  		Id:     s.doc.Unit,
  1429  		Assert: txn.DocExists,
  1430  		Update: bson.D{{"$inc", bson.D{{"storageattachmentcount", -1}}}},
  1431  	})
  1432  	var siAssert interface{}
  1433  	siUpdate := bson.D{{"$inc", bson.D{{"attachmentcount", -1}}}}
  1434  	if si.doc.AttachmentCount == 1 {
  1435  		if si.doc.Life == Dying {
  1436  			// The storage instance is dying: no more attachments
  1437  			// can be added to the instance, so it can be removed.
  1438  			hasLastRef := bson.D{{"life", Dying}, {"attachmentcount", 1}}
  1439  			siOps, err := removeStorageInstanceOps(si, hasLastRef, force)
  1440  			if err != nil {
  1441  				if !force {
  1442  					return nil, errors.Trace(err)
  1443  				}
  1444  				logger.Warningf("could not determine operations for storage instance %v removal: %v", si.StorageTag().Id(), err)
  1445  			}
  1446  			return append(ops, siOps...), nil
  1447  		} else if si.doc.Owner == names.NewUnitTag(s.doc.Unit).String() {
  1448  			// Ensure that removing the storage will not violate the
  1449  			// unit's charm storage requirements.
  1450  			siAssert = bson.D{{"owner", si.doc.Owner}}
  1451  			validateRemoveOps, err := validateRemoveOwnerStorageInstanceOps(si)
  1452  			if err != nil {
  1453  				if !force {
  1454  					return nil, errors.Trace(err)
  1455  				}
  1456  				logger.Warningf("error validating owner for storage instance %v removal: %v", si.StorageTag().Id(), err)
  1457  			}
  1458  			ops = append(ops, validateRemoveOps...)
  1459  
  1460  			// Disown the storage instance, so it can be attached
  1461  			// to another unit/application.
  1462  			siUpdate = append(siUpdate, bson.DocElem{
  1463  				"$unset", bson.D{{"owner", nil}},
  1464  			})
  1465  			decrefOp, err := decrefEntityStorageOp(im.mb, s.Unit(), si.StorageName())
  1466  			if err != nil {
  1467  				if !force {
  1468  					return nil, errors.Trace(err)
  1469  				}
  1470  				logger.Warningf("could not decrease refcount for storage instance %v removal: %v", si.StorageTag().Id(), err)
  1471  			}
  1472  			ops = append(ops, decrefOp)
  1473  		}
  1474  	}
  1475  	decrefOp := txn.Op{
  1476  		C:      storageInstancesC,
  1477  		Id:     si.doc.Id,
  1478  		Assert: siAssert,
  1479  		Update: siUpdate,
  1480  	}
  1481  	if si.doc.Life == Alive {
  1482  		// This may be the last reference, but the storage instance is
  1483  		// still alive. The storage instance will be removed when its
  1484  		// Destroy method is called, if it has no attachments.
  1485  		decrefOp.Assert = bson.D{
  1486  			{"life", Alive},
  1487  			{"attachmentcount", bson.D{{"$gt", 0}}},
  1488  		}
  1489  	} else {
  1490  		// If it's not the last reference when we checked, we want to
  1491  		// allow for concurrent attachment removals but want to ensure
  1492  		// that we don't drop to zero without removing the storage
  1493  		// instance.
  1494  		decrefOp.Assert = bson.D{
  1495  			{"life", Dying},
  1496  			{"attachmentcount", bson.D{{"$gt", 1}}},
  1497  		}
  1498  	}
  1499  	ops = append(ops, decrefOp)
  1500  
  1501  	// If the storage instance has an associated volume or
  1502  	// filesystem, detach the volume/filesystem too.
  1503  	detachOps, err := im.detachStorageAttachmentOps(si, s.Unit(), force)
  1504  	if err != nil {
  1505  		if !force {
  1506  			return nil, errors.Trace(err)
  1507  		}
  1508  		logger.Warningf("could not determine operations to detach storage attachments for storage instance %v unit %v: %v", si.StorageTag().Id(), s.Unit().Id(), err)
  1509  	}
  1510  	ops = append(ops, detachOps...)
  1511  
  1512  	return ops, nil
  1513  }
  1514  
  1515  func (sb *storageBackend) detachStorageAttachmentOps(si *storageInstance, unitTag names.UnitTag, force bool) ([]txn.Op, error) {
  1516  	unit, err := sb.unit(unitTag.Id())
  1517  	if err != nil {
  1518  		return nil, errors.Trace(err)
  1519  	}
  1520  
  1521  	var hostTag names.Tag = unitTag
  1522  	if sb.modelType == ModelTypeIAAS {
  1523  		machineId, err := unit.AssignedMachineId()
  1524  		if errors.IsNotAssigned(err) {
  1525  			return []txn.Op{unit.noAssignedMachineOp()}, nil
  1526  		} else if err != nil {
  1527  			return nil, errors.Trace(err)
  1528  		}
  1529  		hostTag = names.NewMachineTag(machineId)
  1530  	}
  1531  
  1532  	switch si.Kind() {
  1533  	case StorageKindBlock:
  1534  		volume, err := sb.storageInstanceVolume(si.StorageTag())
  1535  		if errors.IsNotFound(err) {
  1536  			// The volume has already been removed, so must have
  1537  			// already been detached.
  1538  			logger.Debugf("%s", err)
  1539  			return nil, nil
  1540  		} else if err != nil {
  1541  			return nil, errors.Trace(err)
  1542  		} else if !volume.Detachable() {
  1543  			// Non-detachable volumes are left attached to the
  1544  			// machine, since the only other option is to destroy
  1545  			// them. The user can remove them explicitly, or else
  1546  			// leave them to be removed along with the machine.
  1547  			logger.Debugf(
  1548  				"%s for %s is non-detachable",
  1549  				names.ReadableString(volume.Tag()),
  1550  				names.ReadableString(si.StorageTag()),
  1551  			)
  1552  			return nil, nil
  1553  		} else if volume.Life() != Alive {
  1554  			// The volume is not alive, so either is already
  1555  			// or will soon be detached.
  1556  			logger.Debugf(
  1557  				"%s is %s",
  1558  				names.ReadableString(volume.Tag()),
  1559  				volume.Life(),
  1560  			)
  1561  			return nil, nil
  1562  		}
  1563  		att, err := sb.VolumeAttachment(hostTag, volume.VolumeTag())
  1564  		if errors.IsNotFound(err) {
  1565  			// Since the storage attachment is Dying, it is not
  1566  			// possible to create a volume attachment for the
  1567  			// machine, associated with the same storage.
  1568  			logger.Debugf("%s", err)
  1569  			return nil, nil
  1570  		} else if err != nil {
  1571  			return nil, errors.Trace(err)
  1572  		}
  1573  		if att.Life() != Alive {
  1574  			logger.Debugf(
  1575  				"%s is detaching from %s",
  1576  				names.ReadableString(volume.Tag()),
  1577  				names.ReadableString(hostTag),
  1578  			)
  1579  			return nil, nil
  1580  		}
  1581  
  1582  		if plans, err := sb.machineVolumeAttachmentPlans(hostTag, volume.VolumeTag()); err != nil {
  1583  			return nil, errors.Trace(err)
  1584  		} else {
  1585  			if len(plans) > 0 {
  1586  				return sb.detachVolumeAttachmentPlanOps(hostTag, volume.VolumeTag(), force)
  1587  			}
  1588  		}
  1589  		return sb.detachVolumeOps(hostTag, volume.VolumeTag(), force)
  1590  
  1591  	case StorageKindFilesystem:
  1592  		filesystem, err := sb.storageInstanceFilesystem(si.StorageTag())
  1593  		if errors.IsNotFound(err) {
  1594  			// The filesystem has already been removed, so must
  1595  			// have already been detached.
  1596  			logger.Debugf("%s", err)
  1597  			return nil, nil
  1598  		} else if err != nil {
  1599  			return nil, errors.Trace(err)
  1600  		} else if !filesystem.Detachable() {
  1601  			// Non-detachable filesystems are left attached to the
  1602  			// machine, since the only other option is to destroy
  1603  			// them. The user can remove them explicitly, or else
  1604  			// leave them to be removed along with the machine.
  1605  			logger.Debugf(
  1606  				"%s for %s is non-detachable",
  1607  				names.ReadableString(filesystem.Tag()),
  1608  				names.ReadableString(si.StorageTag()),
  1609  			)
  1610  			return nil, nil
  1611  		} else if filesystem.Life() != Alive {
  1612  			logger.Debugf(
  1613  				"%s is %s",
  1614  				names.ReadableString(filesystem.Tag()),
  1615  				filesystem.Life(),
  1616  			)
  1617  			return nil, nil
  1618  		}
  1619  		att, err := sb.FilesystemAttachment(hostTag, filesystem.FilesystemTag())
  1620  		if errors.IsNotFound(err) {
  1621  			// Since the storage attachment is Dying, it is not
  1622  			// possible to create a volume attachment for the
  1623  			// machine, associated with the same storage.
  1624  			logger.Debugf("%s", err)
  1625  			return nil, nil
  1626  		} else if err != nil {
  1627  			return nil, errors.Trace(err)
  1628  		}
  1629  		if att.Life() != Alive {
  1630  			logger.Debugf(
  1631  				"%s is detaching from %s",
  1632  				names.ReadableString(filesystem.Tag()),
  1633  				names.ReadableString(hostTag),
  1634  			)
  1635  			return nil, nil
  1636  		}
  1637  		return detachFilesystemOps(hostTag, filesystem.FilesystemTag()), nil
  1638  
  1639  	default:
  1640  		return nil, errors.Errorf("unknown storage type %q", si.Kind())
  1641  	}
  1642  }
  1643  
  1644  // removeStorageInstancesOps returns the transaction operations to remove all
  1645  // storage instances owned by the specified entity.
  1646  func removeStorageInstancesOps(im *storageBackend, owner names.Tag, force bool) ([]txn.Op, error) {
  1647  	coll, closer := im.mb.db().GetCollection(storageInstancesC)
  1648  	defer closer()
  1649  
  1650  	var docs []storageInstanceDoc
  1651  	err := coll.Find(bson.D{{"owner", owner.String()}}).Select(bson.D{{"id", true}}).All(&docs)
  1652  	if err != nil {
  1653  		return nil, errors.Annotatef(err, "cannot get storage instances for %s", owner)
  1654  	}
  1655  	ops := make([]txn.Op, 0, len(docs))
  1656  	var removalErr error
  1657  	for _, doc := range docs {
  1658  		si := &storageInstance{im, doc}
  1659  		storageInstanceOps, err := removeStorageInstanceOps(si, nil, force)
  1660  		if err != nil {
  1661  			removalErr = errors.Trace(err)
  1662  			logger.Warningf("error determining operations for storage instance %v removal: %v", si.StorageTag().Id(), err)
  1663  		}
  1664  		ops = append(ops, storageInstanceOps...)
  1665  	}
  1666  	if !force && removalErr != nil {
  1667  		return nil, removalErr
  1668  	}
  1669  	return ops, nil
  1670  }
  1671  
  1672  // storageConstraintsDoc contains storage constraints for an entity.
  1673  type storageConstraintsDoc struct {
  1674  	DocID       string                        `bson:"_id"`
  1675  	ModelUUID   string                        `bson:"model-uuid"`
  1676  	Constraints map[string]StorageConstraints `bson:"constraints"`
  1677  }
  1678  
  1679  // StorageConstraints contains the user-specified constraints for provisioning
  1680  // storage instances for an application unit.
  1681  type StorageConstraints struct {
  1682  	// Pool is the name of the storage pool from which to provision the
  1683  	// storage instances.
  1684  	Pool string `bson:"pool"`
  1685  
  1686  	// Size is the required size of the storage instances, in MiB.
  1687  	Size uint64 `bson:"size"`
  1688  
  1689  	// Count is the required number of storage instances.
  1690  	Count uint64 `bson:"count"`
  1691  }
  1692  
  1693  func createStorageConstraintsOp(key string, cons map[string]StorageConstraints) txn.Op {
  1694  	return txn.Op{
  1695  		C:      storageConstraintsC,
  1696  		Id:     key,
  1697  		Assert: txn.DocMissing,
  1698  		Insert: &storageConstraintsDoc{
  1699  			Constraints: cons,
  1700  		},
  1701  	}
  1702  }
  1703  
  1704  func replaceStorageConstraintsOp(key string, cons map[string]StorageConstraints) txn.Op {
  1705  	return txn.Op{
  1706  		C:      storageConstraintsC,
  1707  		Id:     key,
  1708  		Assert: txn.DocExists,
  1709  		Update: bson.D{{"$set", bson.D{{"constraints", cons}}}},
  1710  	}
  1711  }
  1712  
  1713  func removeStorageConstraintsOp(key string) txn.Op {
  1714  	return txn.Op{
  1715  		C:      storageConstraintsC,
  1716  		Id:     key,
  1717  		Remove: true,
  1718  	}
  1719  }
  1720  
  1721  func readStorageConstraints(mb modelBackend, key string) (map[string]StorageConstraints, error) {
  1722  	coll, closer := mb.db().GetCollection(storageConstraintsC)
  1723  	defer closer()
  1724  
  1725  	var doc storageConstraintsDoc
  1726  	err := coll.FindId(key).One(&doc)
  1727  	if err == mgo.ErrNotFound {
  1728  		return nil, errors.NotFoundf("storage constraints for %q", key)
  1729  	}
  1730  	if err != nil {
  1731  		return nil, errors.Annotatef(err, "cannot get storage constraints for %q", key)
  1732  	}
  1733  	return doc.Constraints, nil
  1734  }
  1735  
  1736  func storageKind(storageType charm.StorageType) storage.StorageKind {
  1737  	kind := storage.StorageKindUnknown
  1738  	switch storageType {
  1739  	case charm.StorageBlock:
  1740  		kind = storage.StorageKindBlock
  1741  	case charm.StorageFilesystem:
  1742  		kind = storage.StorageKindFilesystem
  1743  	}
  1744  	return kind
  1745  }
  1746  
  1747  func validateStorageConstraints(sb *storageBackend, allCons map[string]StorageConstraints, charmMeta *charm.Meta) error {
  1748  	err := validateStorageConstraintsAgainstCharm(sb, allCons, charmMeta)
  1749  	if err != nil {
  1750  		return errors.Trace(err)
  1751  	}
  1752  	// Ensure all stores have constraints specified. Defaults should have
  1753  	// been set by this point, if the user didn't specify constraints.
  1754  	for name, charmStorage := range charmMeta.Storage {
  1755  		if _, ok := allCons[name]; !ok && charmStorage.CountMin > 0 {
  1756  			return errors.Errorf("no constraints specified for store %q", name)
  1757  		}
  1758  	}
  1759  	return nil
  1760  }
  1761  
  1762  func validateStorageConstraintsAgainstCharm(
  1763  	sb *storageBackend,
  1764  	allCons map[string]StorageConstraints,
  1765  	charmMeta *charm.Meta,
  1766  ) error {
  1767  	for name, cons := range allCons {
  1768  		charmStorage, ok := charmMeta.Storage[name]
  1769  		if !ok {
  1770  			return errors.Errorf("charm %q has no store called %q", charmMeta.Name, name)
  1771  		}
  1772  		if charmStorage.Shared {
  1773  			// TODO(axw) implement shared storage support.
  1774  			return errors.Errorf(
  1775  				"charm %q store %q: shared storage support not implemented",
  1776  				charmMeta.Name, name,
  1777  			)
  1778  		}
  1779  		if err := validateCharmStorageCount(charmStorage, cons.Count); err != nil {
  1780  			return errors.Annotatef(err, "charm %q store %q", charmMeta.Name, name)
  1781  		}
  1782  		if charmStorage.MinimumSize > 0 && cons.Size < charmStorage.MinimumSize {
  1783  			return errors.Errorf(
  1784  				"charm %q store %q: minimum storage size is %s, %s specified",
  1785  				charmMeta.Name, name,
  1786  				humanize.Bytes(charmStorage.MinimumSize*humanize.MByte),
  1787  				humanize.Bytes(cons.Size*humanize.MByte),
  1788  			)
  1789  		}
  1790  		kind := storageKind(charmStorage.Type)
  1791  		if err := validateStoragePool(sb, cons.Pool, kind, nil); err != nil {
  1792  			return err
  1793  		}
  1794  	}
  1795  	return nil
  1796  }
  1797  
  1798  func validateCharmStorageCountChange(charmStorage charm.Storage, current, n int) error {
  1799  	action := "attach"
  1800  	absn := n
  1801  	if n < 0 {
  1802  		action = "detach"
  1803  		absn = -absn
  1804  	}
  1805  	gerund := action + "ing"
  1806  	pluralise := ""
  1807  	if absn != 1 {
  1808  		pluralise = "s"
  1809  	}
  1810  
  1811  	count := uint64(current + n)
  1812  	if charmStorage.CountMin == 1 && charmStorage.CountMax == 1 && count != 1 {
  1813  		return errors.Errorf("cannot %s, storage is singular", action)
  1814  	}
  1815  	if count < uint64(charmStorage.CountMin) {
  1816  		return errors.Errorf(
  1817  			"%s %d storage instance%s brings the total to %d, "+
  1818  				"which is less than the minimum of %d",
  1819  			gerund, absn, pluralise, count,
  1820  			charmStorage.CountMin,
  1821  		)
  1822  	}
  1823  	if charmStorage.CountMax >= 0 && count > uint64(charmStorage.CountMax) {
  1824  		return errors.Errorf(
  1825  			"%s %d storage instance%s brings the total to %d, "+
  1826  				"exceeding the maximum of %d",
  1827  			gerund, absn, pluralise, count,
  1828  			charmStorage.CountMax,
  1829  		)
  1830  	}
  1831  	return nil
  1832  }
  1833  
  1834  func validateCharmStorageCount(charmStorage charm.Storage, count uint64) error {
  1835  	if charmStorage.CountMin == 1 && charmStorage.CountMax == 1 && count != 1 {
  1836  		return errors.Errorf("storage is singular, %d specified", count)
  1837  	}
  1838  	if count < uint64(charmStorage.CountMin) {
  1839  		return errors.Errorf(
  1840  			"%d instances required, %d specified",
  1841  			charmStorage.CountMin, count,
  1842  		)
  1843  	}
  1844  	if charmStorage.CountMax >= 0 && count > uint64(charmStorage.CountMax) {
  1845  		return errors.Errorf(
  1846  			"at most %d instances supported, %d specified",
  1847  			charmStorage.CountMax, count,
  1848  		)
  1849  	}
  1850  	return nil
  1851  }
  1852  
  1853  // validateStoragePool validates the storage pool for the model.
  1854  // If machineId is non-nil, the storage scope will be validated against
  1855  // the machineId; if the storage is not machine-scoped, then the machineId
  1856  // will be updated to "".
  1857  func validateStoragePool(
  1858  	sb *storageBackend, poolName string, kind storage.StorageKind, machineId *string,
  1859  ) error {
  1860  	if poolName == "" {
  1861  		return errors.New("pool name is required")
  1862  	}
  1863  	providerType, aProvider, poolConfig, err := poolStorageProvider(sb, poolName)
  1864  	if err != nil {
  1865  		return errors.Trace(err)
  1866  	}
  1867  
  1868  	// Ensure the storage provider supports the specified kind.
  1869  	kindSupported := aProvider.Supports(kind)
  1870  	if !kindSupported && kind == storage.StorageKindFilesystem {
  1871  		// Filesystems can be created if either filesystem
  1872  		// or block storage are supported. The scope of the
  1873  		// filesystem is the same as the backing volume.
  1874  		kindSupported = aProvider.Supports(storage.StorageKindBlock)
  1875  	}
  1876  	if !kindSupported {
  1877  		return errors.Errorf("%q provider does not support %q storage", providerType, kind)
  1878  	}
  1879  
  1880  	// Check the storage scope.
  1881  	if machineId != nil {
  1882  		switch aProvider.Scope() {
  1883  		case storage.ScopeMachine:
  1884  			if *machineId == "" {
  1885  				return errors.Annotate(err, "machine unspecified for machine-scoped storage")
  1886  			}
  1887  		default:
  1888  			// The storage is not machine-scoped, so we clear out
  1889  			// the machine ID to inform the caller that the storage
  1890  			// scope should be the model.
  1891  			*machineId = ""
  1892  		}
  1893  	}
  1894  	//
  1895  	if sb.modelType == ModelTypeCAAS {
  1896  		if err := aProvider.ValidateForK8s(poolConfig); err != nil {
  1897  			return errors.Annotatef(err, "invalid storage config")
  1898  		}
  1899  	}
  1900  
  1901  	return nil
  1902  }
  1903  
  1904  func poolStorageProvider(sb *storageBackend, poolName string) (storage.ProviderType, storage.Provider, map[string]interface{}, error) {
  1905  	registry, err := sb.registry()
  1906  	if err != nil {
  1907  		return "", nil, nil, errors.Trace(err)
  1908  	}
  1909  	poolManager := poolmanager.New(sb.settings, registry)
  1910  	pool, err := poolManager.Get(poolName)
  1911  	if errors.IsNotFound(err) {
  1912  		// If there's no pool called poolName, maybe a provider type
  1913  		// has been specified directly.
  1914  		providerType := storage.ProviderType(poolName)
  1915  		aProvider, err1 := registry.StorageProvider(providerType)
  1916  		if err1 != nil {
  1917  			// The name can't be resolved as a storage provider type,
  1918  			// so return the original "pool not found" error.
  1919  			return "", nil, nil, errors.Trace(err)
  1920  		}
  1921  		return providerType, aProvider, nil, nil
  1922  	} else if err != nil {
  1923  		return "", nil, nil, errors.Trace(err)
  1924  	}
  1925  	providerType := pool.Provider()
  1926  	aProvider, err := registry.StorageProvider(providerType)
  1927  	if err != nil {
  1928  		return "", nil, nil, errors.Trace(err)
  1929  	}
  1930  	return providerType, aProvider, pool.Attrs(), nil
  1931  }
  1932  
  1933  // ErrNoDefaultStoragePool is returned when a storage pool is required but none
  1934  // is specified nor available as a default.
  1935  var ErrNoDefaultStoragePool = fmt.Errorf("no storage pool specified and no default available")
  1936  
  1937  // addDefaultStorageConstraints fills in default constraint values, replacing any empty/missing values
  1938  // in the specified constraints.
  1939  func addDefaultStorageConstraints(sb *storageBackend, allCons map[string]StorageConstraints, charmMeta *charm.Meta) error {
  1940  	conf, err := sb.config()
  1941  	if err != nil {
  1942  		return errors.Trace(err)
  1943  	}
  1944  
  1945  	for name, charmStorage := range charmMeta.Storage {
  1946  		cons, ok := allCons[name]
  1947  		if !ok {
  1948  			if charmStorage.Shared {
  1949  				// TODO(axw) get the model's default shared storage
  1950  				// pool, and create constraints here.
  1951  				return errors.Errorf(
  1952  					"no constraints specified for shared charm storage %q",
  1953  					name,
  1954  				)
  1955  			}
  1956  		}
  1957  		cons, err := storageConstraintsWithDefaults(sb.modelType, conf, charmStorage, name, cons)
  1958  		if err != nil {
  1959  			return errors.Trace(err)
  1960  		}
  1961  		// Replace in case pool or size were updated.
  1962  		allCons[name] = cons
  1963  	}
  1964  	return nil
  1965  }
  1966  
  1967  // storageConstraintsWithDefaults returns a constraints
  1968  // derived from cons, with any defaults filled in.
  1969  func storageConstraintsWithDefaults(
  1970  	modelType ModelType,
  1971  	cfg *config.Config,
  1972  	charmStorage charm.Storage,
  1973  	name string,
  1974  	cons StorageConstraints,
  1975  ) (StorageConstraints, error) {
  1976  	withDefaults := cons
  1977  
  1978  	// If no pool is specified, determine the pool from the env config and other constraints.
  1979  	if cons.Pool == "" {
  1980  		kind := storageKind(charmStorage.Type)
  1981  		poolName, err := defaultStoragePool(modelType, cfg, kind, cons)
  1982  		if err != nil {
  1983  			return withDefaults, errors.Annotatef(err, "finding default pool for %q storage", name)
  1984  		}
  1985  		withDefaults.Pool = poolName
  1986  	}
  1987  
  1988  	// If no size is specified, we default to the min size specified by the
  1989  	// charm, or 1GiB.
  1990  	if cons.Size == 0 {
  1991  		if charmStorage.MinimumSize > 0 {
  1992  			withDefaults.Size = charmStorage.MinimumSize
  1993  		} else {
  1994  			withDefaults.Size = 1024
  1995  		}
  1996  	}
  1997  	if cons.Count == 0 {
  1998  		withDefaults.Count = uint64(charmStorage.CountMin)
  1999  	}
  2000  	return withDefaults, nil
  2001  }
  2002  
  2003  // defaultStoragePool returns the default storage pool for the model.
  2004  // The default pool is either user specified, or one that is registered by the provider itself.
  2005  func defaultStoragePool(modelType ModelType, cfg *config.Config, kind storage.StorageKind, cons StorageConstraints) (string, error) {
  2006  	switch kind {
  2007  	case storage.StorageKindBlock:
  2008  		fallbackPool := string(provider.LoopProviderType)
  2009  		if modelType == ModelTypeCAAS {
  2010  			fallbackPool = string(k8sconstants.StorageProviderType)
  2011  		}
  2012  
  2013  		emptyConstraints := StorageConstraints{}
  2014  		if cons == emptyConstraints {
  2015  			// No constraints at all: use fallback.
  2016  			return fallbackPool, nil
  2017  		}
  2018  		// Either size or count specified, use env default.
  2019  		defaultPool, ok := cfg.StorageDefaultBlockSource()
  2020  		if !ok {
  2021  			defaultPool = fallbackPool
  2022  		}
  2023  		return defaultPool, nil
  2024  
  2025  	case storage.StorageKindFilesystem:
  2026  		fallbackPool := string(provider.RootfsProviderType)
  2027  		if modelType == ModelTypeCAAS {
  2028  			fallbackPool = string(k8sconstants.StorageProviderType)
  2029  		}
  2030  		emptyConstraints := StorageConstraints{}
  2031  		if cons == emptyConstraints {
  2032  			return fallbackPool, nil
  2033  		}
  2034  
  2035  		// If a filesystem source is specified in config,
  2036  		// use that; otherwise if a block source is specified,
  2037  		// use that and create a filesystem within.
  2038  		defaultPool, ok := cfg.StorageDefaultFilesystemSource()
  2039  		if !ok {
  2040  			defaultPool, ok = cfg.StorageDefaultBlockSource()
  2041  			if !ok {
  2042  				// No filesystem or block source,
  2043  				// so just use rootfs.
  2044  				defaultPool = fallbackPool
  2045  			}
  2046  		}
  2047  		return defaultPool, nil
  2048  	}
  2049  	return "", ErrNoDefaultStoragePool
  2050  }
  2051  
  2052  // AddStorageForUnit adds storage instances to given unit as specified.
  2053  //
  2054  // Missing storage constraints are populated based on model defaults.
  2055  // Storage store name is used to retrieve existing storage instances
  2056  // for this store. Combination of existing storage instances and
  2057  // anticipated additional storage instances is validated against the
  2058  // store as specified in the charm.
  2059  func (sb *storageBackend) AddStorageForUnit(
  2060  	tag names.UnitTag, storageName string, cons StorageConstraints,
  2061  ) ([]names.StorageTag, error) {
  2062  	modelOp, err := sb.AddStorageForUnitOperation(tag, storageName, cons)
  2063  	if err != nil {
  2064  		return nil, errors.Trace(err)
  2065  	}
  2066  
  2067  	rawModelOp := modelOp.(*addStorageForUnitOperation)
  2068  	if err = sb.mb.db().Run(rawModelOp.Build); err != nil {
  2069  		return nil, errors.Trace(err)
  2070  	}
  2071  	return rawModelOp.tags, nil
  2072  }
  2073  
  2074  // AddStorageForUnitOperation returns a ModelOperation for adding storage
  2075  // instances to the given unit as specified.
  2076  //
  2077  // Missing storage constraints are populated based on model defaults.
  2078  // Storage store name is used to retrieve existing storage instances
  2079  // for this store. Combination of existing storage instances and
  2080  // anticipated additional storage instances is validated against the
  2081  // store as specified in the charm.
  2082  func (sb *storageBackend) AddStorageForUnitOperation(tag names.UnitTag, storageName string, cons StorageConstraints) (ModelOperation, error) {
  2083  	u, err := sb.unit(tag.Id())
  2084  	if err != nil {
  2085  		return nil, errors.Trace(err)
  2086  	}
  2087  
  2088  	return &addStorageForUnitOperation{
  2089  		sb:                 sb,
  2090  		u:                  u,
  2091  		storageName:        storageName,
  2092  		storageConstraints: cons,
  2093  	}, nil
  2094  }
  2095  
  2096  // addStorage adds storage instances to given unit as specified.
  2097  func (sb *storageBackend) addStorageForUnitOps(
  2098  	u *Unit,
  2099  	storageName string,
  2100  	cons StorageConstraints,
  2101  ) ([]names.StorageTag, []txn.Op, error) {
  2102  	if u.Life() != Alive {
  2103  		return nil, nil, unitNotAliveErr
  2104  	}
  2105  
  2106  	// Storage addition is based on the charm metadata; u.charm()
  2107  	// returns txn.Ops that ensure the charm URL does not change
  2108  	// during the transaction.
  2109  	ch, err := u.charm()
  2110  	if err != nil {
  2111  		return nil, nil, errors.Trace(err)
  2112  	}
  2113  	charmMeta := ch.Meta()
  2114  	charmStorageMeta, ok := charmMeta.Storage[storageName]
  2115  	if !ok {
  2116  		return nil, nil, errors.NotFoundf("charm storage %q", storageName)
  2117  	}
  2118  	ops := u.assertCharmOps(ch)
  2119  
  2120  	if cons.Pool == "" || cons.Size == 0 {
  2121  		// Either pool or size, or both, were not specified. Take the
  2122  		// values from the unit's recorded storage constraints.
  2123  		allCons, err := u.StorageConstraints()
  2124  		if err != nil {
  2125  			return nil, nil, errors.Trace(err)
  2126  		}
  2127  		if uCons, ok := allCons[storageName]; ok {
  2128  			if cons.Pool == "" {
  2129  				cons.Pool = uCons.Pool
  2130  			}
  2131  			if cons.Size == 0 {
  2132  				cons.Size = uCons.Size
  2133  			}
  2134  		}
  2135  
  2136  		// Populate missing configuration parameters with defaults.
  2137  		if cons.Pool == "" || cons.Size == 0 {
  2138  			modelConfig, err := sb.config()
  2139  			if err != nil {
  2140  				return nil, nil, errors.Trace(err)
  2141  			}
  2142  			completeCons, err := storageConstraintsWithDefaults(
  2143  				sb.modelType,
  2144  				modelConfig,
  2145  				charmStorageMeta,
  2146  				storageName,
  2147  				cons,
  2148  			)
  2149  			if err != nil {
  2150  				return nil, nil, errors.Trace(err)
  2151  			}
  2152  			cons = completeCons
  2153  		}
  2154  	}
  2155  
  2156  	// This can happen for charm stores that specify instances range from 0,
  2157  	// and no count was specified at deploy as storage constraints for this store,
  2158  	// and no count was specified to storage add as a contraint either.
  2159  	if cons.Count == 0 {
  2160  		return nil, nil, errors.NotValidf("adding storage where instance count is 0")
  2161  	}
  2162  
  2163  	tags, addUnitStorageOps, err := sb.addUnitStorageOps(charmMeta, u, storageName, cons, -1)
  2164  	if err != nil {
  2165  		return nil, nil, errors.Trace(err)
  2166  	}
  2167  	ops = append(ops, addUnitStorageOps...)
  2168  	return tags, ops, nil
  2169  }
  2170  
  2171  // addUnitStorageOps returns transaction ops to create storage for the given
  2172  // unit. If countMin is non-negative, the Count field of the constraints will
  2173  // be ignored, and as many storage instances as necessary to make up the
  2174  // shortfall will be created.
  2175  func (sb *storageBackend) addUnitStorageOps(
  2176  	charmMeta *charm.Meta,
  2177  	u *Unit,
  2178  	storageName string,
  2179  	cons StorageConstraints,
  2180  	countMin int,
  2181  ) ([]names.StorageTag, []txn.Op, error) {
  2182  	var ops []txn.Op
  2183  
  2184  	consTotal := cons
  2185  	if countMin < 0 {
  2186  		// Validate that the requested number of storage
  2187  		// instances can be added to the unit.
  2188  		currentCount, currentCountOp, err := validateStorageCountChange(
  2189  			sb, u.Tag(), storageName, int(cons.Count), charmMeta,
  2190  		)
  2191  		if err != nil {
  2192  			return nil, nil, errors.Trace(err)
  2193  		}
  2194  		ops = append(ops, currentCountOp)
  2195  		consTotal.Count += uint64(currentCount)
  2196  	} else {
  2197  		currentCountOp, currentCount, err := sb.countEntityStorageInstances(u.Tag(), storageName)
  2198  		if err != nil {
  2199  			return nil, nil, errors.Trace(err)
  2200  		}
  2201  		ops = append(ops, currentCountOp)
  2202  		if currentCount >= countMin {
  2203  			return nil, ops, nil
  2204  		}
  2205  		cons.Count = uint64(countMin)
  2206  	}
  2207  
  2208  	if err := validateStorageConstraintsAgainstCharm(sb,
  2209  		map[string]StorageConstraints{storageName: consTotal},
  2210  		charmMeta,
  2211  	); err != nil {
  2212  		return nil, nil, errors.Trace(err)
  2213  	}
  2214  
  2215  	// Create storage db operations
  2216  	storageOps, storageTags, _, err := createStorageOps(
  2217  		sb,
  2218  		u.Tag(),
  2219  		charmMeta,
  2220  		map[string]StorageConstraints{storageName: cons},
  2221  		u.Base().OS,
  2222  		u,
  2223  	)
  2224  	if err != nil {
  2225  		return nil, nil, errors.Trace(err)
  2226  	}
  2227  	// Increment reference counts for the named storage for each
  2228  	// instance we create. We'll use the reference counts to ensure
  2229  	// we don't exceed limits when adding storage, and for
  2230  	// maintaining model integrity during charm upgrades.
  2231  	var allTags []names.StorageTag
  2232  	for name, tags := range storageTags {
  2233  		count := len(tags)
  2234  		incRefOp, err := increfEntityStorageOp(sb.mb, u.Tag(), name, count)
  2235  		if err != nil {
  2236  			return nil, nil, errors.Trace(err)
  2237  		}
  2238  		storageOps = append(storageOps, incRefOp)
  2239  		allTags = append(allTags, tags...)
  2240  	}
  2241  	ops = append(ops, txn.Op{
  2242  		C:      unitsC,
  2243  		Id:     u.doc.DocID,
  2244  		Assert: isAliveDoc,
  2245  		Update: bson.D{{"$inc",
  2246  			bson.D{{"storageattachmentcount", int(cons.Count)}}}},
  2247  	})
  2248  	return allTags, append(ops, storageOps...), nil
  2249  }
  2250  
  2251  func (sb *storageBackend) countEntityStorageInstances(owner names.Tag, name string) (txn.Op, int, error) {
  2252  	refcounts, closer := sb.mb.db().GetCollection(refcountsC)
  2253  	defer closer()
  2254  	key := entityStorageRefcountKey(owner, name)
  2255  	return nsRefcounts.CurrentOp(refcounts, key)
  2256  }
  2257  
  2258  type storageParams struct {
  2259  	volumes               []HostVolumeParams
  2260  	volumeAttachments     map[names.VolumeTag]VolumeAttachmentParams
  2261  	filesystems           []HostFilesystemParams
  2262  	filesystemAttachments map[names.FilesystemTag]FilesystemAttachmentParams
  2263  }
  2264  
  2265  func combineStorageParams(lhs, rhs *storageParams) *storageParams {
  2266  	out := &storageParams{}
  2267  	out.volumes = append(lhs.volumes[:], rhs.volumes...)
  2268  	out.filesystems = append(lhs.filesystems[:], rhs.filesystems...)
  2269  	if lhs.volumeAttachments != nil || rhs.volumeAttachments != nil {
  2270  		out.volumeAttachments = make(map[names.VolumeTag]VolumeAttachmentParams)
  2271  		for k, v := range lhs.volumeAttachments {
  2272  			out.volumeAttachments[k] = v
  2273  		}
  2274  		for k, v := range rhs.volumeAttachments {
  2275  			out.volumeAttachments[k] = v
  2276  		}
  2277  	}
  2278  	if lhs.filesystemAttachments != nil || rhs.filesystemAttachments != nil {
  2279  		out.filesystemAttachments = make(map[names.FilesystemTag]FilesystemAttachmentParams)
  2280  		for k, v := range lhs.filesystemAttachments {
  2281  			out.filesystemAttachments[k] = v
  2282  		}
  2283  		for k, v := range rhs.filesystemAttachments {
  2284  			out.filesystemAttachments[k] = v
  2285  		}
  2286  	}
  2287  	return out
  2288  }
  2289  
  2290  // hostStorageOps creates txn.Ops for creating volumes, filesystems,
  2291  // and attachments to the specified host. The results are the txn.Ops,
  2292  // and the tags of volumes and filesystems newly attached to the host.
  2293  func (sb *storageBackend) hostStorageOps(
  2294  	hostId string, args *storageParams,
  2295  ) ([]txn.Op, []volumeAttachmentTemplate, []filesystemAttachmentTemplate, error) {
  2296  	var filesystemOps, volumeOps []txn.Op
  2297  	var fsAttachments []filesystemAttachmentTemplate
  2298  	var volumeAttachments []volumeAttachmentTemplate
  2299  
  2300  	const (
  2301  		createAndAttach = false
  2302  		attachOnly      = true
  2303  	)
  2304  
  2305  	// Create filesystems and filesystem attachments.
  2306  	for _, f := range args.filesystems {
  2307  		ops, filesystemTag, volumeTag, err := sb.addFilesystemOps(f.Filesystem, hostId)
  2308  		if err != nil {
  2309  			return nil, nil, nil, errors.Trace(err)
  2310  		}
  2311  		filesystemOps = append(filesystemOps, ops...)
  2312  		fsAttachments = append(fsAttachments, filesystemAttachmentTemplate{
  2313  			filesystemTag, f.Filesystem.storage, f.Attachment, createAndAttach,
  2314  		})
  2315  		if volumeTag != (names.VolumeTag{}) {
  2316  			// The filesystem requires a volume, so create a volume attachment too.
  2317  			volumeAttachments = append(volumeAttachments, volumeAttachmentTemplate{
  2318  				volumeTag, VolumeAttachmentParams{}, createAndAttach,
  2319  			})
  2320  		}
  2321  	}
  2322  	for tag, filesystemAttachment := range args.filesystemAttachments {
  2323  		fsAttachments = append(fsAttachments, filesystemAttachmentTemplate{
  2324  			tag, names.StorageTag{}, filesystemAttachment, attachOnly,
  2325  		})
  2326  	}
  2327  
  2328  	// Create volumes and volume attachments.
  2329  	for _, v := range args.volumes {
  2330  		ops, tag, err := sb.addVolumeOps(v.Volume, hostId)
  2331  		if err != nil {
  2332  			return nil, nil, nil, errors.Trace(err)
  2333  		}
  2334  		volumeOps = append(volumeOps, ops...)
  2335  		volumeAttachments = append(volumeAttachments, volumeAttachmentTemplate{
  2336  			tag, v.Attachment, createAndAttach,
  2337  		})
  2338  	}
  2339  	for tag, volumeAttachment := range args.volumeAttachments {
  2340  		volumeAttachments = append(volumeAttachments, volumeAttachmentTemplate{
  2341  			tag, volumeAttachment, attachOnly,
  2342  		})
  2343  	}
  2344  
  2345  	ops := make([]txn.Op, 0, len(filesystemOps)+len(volumeOps)+len(fsAttachments)+len(volumeAttachments))
  2346  	if len(fsAttachments) > 0 {
  2347  		attachmentOps := createMachineFilesystemAttachmentsOps(hostId, fsAttachments)
  2348  		ops = append(ops, filesystemOps...)
  2349  		ops = append(ops, attachmentOps...)
  2350  	}
  2351  	if len(volumeAttachments) > 0 {
  2352  		attachmentOps := createMachineVolumeAttachmentsOps(hostId, volumeAttachments)
  2353  		ops = append(ops, volumeOps...)
  2354  		ops = append(ops, attachmentOps...)
  2355  	}
  2356  	return ops, volumeAttachments, fsAttachments, nil
  2357  }
  2358  
  2359  // addMachineStorageAttachmentsOps returns txn.Ops for adding the IDs of
  2360  // attached volumes and filesystems to an existing machine. Filesystem
  2361  // mount points are checked against existing filesystem attachments for
  2362  // conflicts, with a txn.Op added to prevent concurrent additions as
  2363  // necessary.
  2364  func addMachineStorageAttachmentsOps(
  2365  	machine *Machine,
  2366  	volumes []volumeAttachmentTemplate,
  2367  	filesystems []filesystemAttachmentTemplate,
  2368  ) ([]txn.Op, error) {
  2369  	var addToSet bson.D
  2370  	assert := isAliveDoc
  2371  	if len(volumes) > 0 {
  2372  		volumeIds := make([]string, len(volumes))
  2373  		for i, v := range volumes {
  2374  			volumeIds[i] = v.tag.Id()
  2375  		}
  2376  		addToSet = append(addToSet, bson.DocElem{
  2377  			"volumes", bson.D{{"$each", volumeIds}},
  2378  		})
  2379  	}
  2380  	if len(filesystems) > 0 {
  2381  		filesystemIds := make([]string, len(filesystems))
  2382  		var withLocation []filesystemAttachmentTemplate
  2383  		for i, f := range filesystems {
  2384  			filesystemIds[i] = f.tag.Id()
  2385  			if !f.params.locationAutoGenerated {
  2386  				// If the location was not automatically
  2387  				// generated, we must ensure it does not
  2388  				// conflict with any existing storage.
  2389  				// Generated paths are guaranteed to be
  2390  				// unique.
  2391  				withLocation = append(withLocation, f)
  2392  			}
  2393  		}
  2394  		addToSet = append(addToSet, bson.DocElem{
  2395  			"filesystems", bson.D{{"$each", filesystemIds}},
  2396  		})
  2397  		if len(withLocation) > 0 {
  2398  			if err := validateFilesystemMountPoints(machine, withLocation); err != nil {
  2399  				return nil, errors.Annotate(err, "validating filesystem mount points")
  2400  			}
  2401  			// Make sure no filesystems are added concurrently.
  2402  			assert = append(assert, bson.DocElem{
  2403  				"filesystems", bson.D{{"$not", bson.D{{
  2404  					"$elemMatch", bson.D{{
  2405  						"$nin", machine.doc.Filesystems,
  2406  					}},
  2407  				}}}},
  2408  			})
  2409  		}
  2410  	}
  2411  	var update interface{}
  2412  	if len(addToSet) > 0 {
  2413  		update = bson.D{{"$addToSet", addToSet}}
  2414  	}
  2415  	return []txn.Op{{
  2416  		C:      machinesC,
  2417  		Id:     machine.doc.Id,
  2418  		Assert: assert,
  2419  		Update: update,
  2420  	}}, nil
  2421  }