github.com/axw/juju@v0.0.0-20161005053422-4bd6544d08d4/state/storage.go (about) 1 // Copyright 2015 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package state 5 6 import ( 7 "fmt" 8 9 "github.com/dustin/go-humanize" 10 "github.com/juju/errors" 11 jujutxn "github.com/juju/txn" 12 "github.com/juju/utils/set" 13 "gopkg.in/juju/charm.v6-unstable" 14 "gopkg.in/juju/names.v2" 15 "gopkg.in/mgo.v2" 16 "gopkg.in/mgo.v2/bson" 17 "gopkg.in/mgo.v2/txn" 18 19 "github.com/juju/juju/environs/config" 20 "github.com/juju/juju/storage" 21 "github.com/juju/juju/storage/poolmanager" 22 "github.com/juju/juju/storage/provider" 23 ) 24 25 // StorageInstance represents the state of a unit or application-wide storage 26 // instance in the model. 27 type StorageInstance interface { 28 Entity 29 30 // StorageTag returns the tag for the storage instance. 31 StorageTag() names.StorageTag 32 33 // Kind returns the storage instance kind. 34 Kind() StorageKind 35 36 // Owner returns the tag of the application or unit that owns this storage 37 // instance. 38 Owner() names.Tag 39 40 // StorageName returns the name of the storage, as defined in the charm 41 // storage metadata. This does not uniquely identify storage instances, 42 // but identifies the group that the instances belong to. 43 StorageName() string 44 45 // Life reports whether the storage instance is Alive, Dying or Dead. 46 Life() Life 47 } 48 49 // StorageAttachment represents the state of a unit's attachment to a storage 50 // instance. A non-shared storage instance will have a single attachment for 51 // the storage instance's owning unit, whereas a shared storage instance will 52 // have an attachment for each unit of the service owning the storage instance. 53 type StorageAttachment interface { 54 // StorageInstance returns the tag of the corresponding storage 55 // instance. 56 StorageInstance() names.StorageTag 57 58 // Unit returns the tag of the corresponding unit. 59 Unit() names.UnitTag 60 61 // Life reports whether the storage attachment is Alive, Dying or Dead. 62 Life() Life 63 } 64 65 // StorageKind defines the type of a store: whether it is a block device 66 // or a filesystem. 67 type StorageKind int 68 69 const ( 70 StorageKindUnknown StorageKind = iota 71 StorageKindBlock 72 StorageKindFilesystem 73 ) 74 75 type storageInstance struct { 76 st *State 77 doc storageInstanceDoc 78 } 79 80 // String returns a human readable string represting the type. 81 func (k StorageKind) String() string { 82 switch k { 83 case StorageKindBlock: 84 return "block" 85 case StorageKindFilesystem: 86 return "filesystem" 87 default: 88 return "unknown" 89 } 90 } 91 92 // parseStorageKind is used by the migration code to go from the 93 // string representation back to the enum. 94 func parseStorageKind(value string) StorageKind { 95 switch value { 96 case "block": 97 return StorageKindBlock 98 case "filesystem": 99 return StorageKindFilesystem 100 default: 101 return StorageKindUnknown 102 } 103 } 104 105 func (s *storageInstance) Tag() names.Tag { 106 return s.StorageTag() 107 } 108 109 func (s *storageInstance) StorageTag() names.StorageTag { 110 return names.NewStorageTag(s.doc.Id) 111 } 112 113 func (s *storageInstance) Kind() StorageKind { 114 return s.doc.Kind 115 } 116 117 func (s *storageInstance) Owner() names.Tag { 118 tag, err := names.ParseTag(s.doc.Owner) 119 if err != nil { 120 // This should be impossible; we do not expose 121 // a means of modifying the owner tag. 122 panic(err) 123 } 124 return tag 125 } 126 127 func (s *storageInstance) StorageName() string { 128 return s.doc.StorageName 129 } 130 131 func (s *storageInstance) Life() Life { 132 return s.doc.Life 133 } 134 135 // entityStorageRefcountKey returns a key for refcounting charm storage 136 // for a specific entity. Each time a storage instance is created, the 137 // named store's refcount is incremented; and decremented when removed. 138 func entityStorageRefcountKey(owner names.Tag, storageName string) string { 139 return fmt.Sprintf("storage#%s#%s", owner.String(), storageName) 140 } 141 142 // storageInstanceDoc describes a charm storage instance. 143 type storageInstanceDoc struct { 144 DocID string `bson:"_id"` 145 ModelUUID string `bson:"model-uuid"` 146 147 Id string `bson:"id"` 148 Kind StorageKind `bson:"storagekind"` 149 Life Life `bson:"life"` 150 Owner string `bson:"owner"` 151 StorageName string `bson:"storagename"` 152 AttachmentCount int `bson:"attachmentcount"` 153 } 154 155 type storageAttachment struct { 156 doc storageAttachmentDoc 157 } 158 159 func (s *storageAttachment) StorageInstance() names.StorageTag { 160 return names.NewStorageTag(s.doc.StorageInstance) 161 } 162 163 func (s *storageAttachment) Unit() names.UnitTag { 164 return names.NewUnitTag(s.doc.Unit) 165 } 166 167 func (s *storageAttachment) Life() Life { 168 return s.doc.Life 169 } 170 171 // storageAttachmentDoc describes a unit's attachment to a charm storage 172 // instance. 173 type storageAttachmentDoc struct { 174 DocID string `bson:"_id"` 175 ModelUUID string `bson:"model-uuid"` 176 177 Unit string `bson:"unitid"` 178 StorageInstance string `bson:"storageid"` 179 Life Life `bson:"life"` 180 } 181 182 // newStorageInstanceId returns a unique storage instance name. The name 183 // incorporates the storage name as defined in the charm storage metadata, 184 // and a unique sequence number. 185 func newStorageInstanceId(st *State, store string) (string, error) { 186 seq, err := st.sequence("stores") 187 if err != nil { 188 return "", errors.Trace(err) 189 } 190 return fmt.Sprintf("%s/%v", store, seq), nil 191 } 192 193 func storageAttachmentId(unit string, storageInstanceId string) string { 194 return fmt.Sprintf("%s#%s", unitGlobalKey(unit), storageInstanceId) 195 } 196 197 // StorageInstance returns the StorageInstance with the specified tag. 198 func (st *State) StorageInstance(tag names.StorageTag) (StorageInstance, error) { 199 s, err := st.storageInstance(tag) 200 return s, err 201 } 202 203 func (st *State) storageInstance(tag names.StorageTag) (*storageInstance, error) { 204 storageInstances, cleanup := st.getCollection(storageInstancesC) 205 defer cleanup() 206 207 s := storageInstance{st: st} 208 err := storageInstances.FindId(tag.Id()).One(&s.doc) 209 if err == mgo.ErrNotFound { 210 return nil, errors.NotFoundf("storage instance %q", tag.Id()) 211 } else if err != nil { 212 return nil, errors.Annotate(err, "cannot get storage instance details") 213 } 214 return &s, nil 215 } 216 217 // AllStorageInstances lists all storage instances currently in state 218 // for this Juju model. 219 func (st *State) AllStorageInstances() (storageInstances []StorageInstance, err error) { 220 storageCollection, closer := st.getCollection(storageInstancesC) 221 defer closer() 222 223 sdocs := []storageInstanceDoc{} 224 err = storageCollection.Find(nil).All(&sdocs) 225 if err != nil { 226 return nil, errors.Annotate(err, "cannot get all storage instances") 227 } 228 for _, doc := range sdocs { 229 storageInstances = append(storageInstances, &storageInstance{st, doc}) 230 } 231 return 232 } 233 234 // DestroyStorageInstance ensures that the storage instance and all its 235 // attachments will be removed at some point; if the storage instance has 236 // no attachments, it will be removed immediately. 237 func (st *State) DestroyStorageInstance(tag names.StorageTag) (err error) { 238 defer errors.DeferredAnnotatef(&err, "cannot destroy storage %q", tag.Id()) 239 buildTxn := func(attempt int) ([]txn.Op, error) { 240 s, err := st.storageInstance(tag) 241 if errors.IsNotFound(err) { 242 return nil, jujutxn.ErrNoOperations 243 } else if err != nil { 244 return nil, errors.Trace(err) 245 } 246 switch ops, err := st.destroyStorageInstanceOps(s); err { 247 case errAlreadyDying: 248 return nil, jujutxn.ErrNoOperations 249 case nil: 250 return ops, nil 251 default: 252 return nil, errors.Trace(err) 253 } 254 } 255 return st.run(buildTxn) 256 } 257 258 func (st *State) destroyStorageInstanceOps(s *storageInstance) ([]txn.Op, error) { 259 if s.doc.Life == Dying { 260 return nil, errAlreadyDying 261 } 262 if s.doc.AttachmentCount == 0 { 263 // There are no attachments remaining, so we can 264 // remove the storage instance immediately. 265 hasNoAttachments := bson.D{{"attachmentcount", 0}} 266 assert := append(hasNoAttachments, isAliveDoc...) 267 return removeStorageInstanceOps(st, s.Owner(), s.StorageTag(), assert) 268 } 269 // There are still attachments: the storage instance will be removed 270 // when the last attachment is removed. We schedule a cleanup to destroy 271 // attachments. 272 notLastRefs := bson.D{ 273 {"life", Alive}, 274 {"attachmentcount", bson.D{{"$gt", 0}}}, 275 } 276 update := bson.D{{"$set", bson.D{{"life", Dying}}}} 277 ops := []txn.Op{ 278 newCleanupOp(cleanupAttachmentsForDyingStorage, s.doc.Id), 279 { 280 C: storageInstancesC, 281 Id: s.doc.Id, 282 Assert: notLastRefs, 283 Update: update, 284 }, 285 } 286 return ops, nil 287 } 288 289 // removeStorageInstanceOps removes the storage instance with the given 290 // tag from state, if the specified assertions hold true. 291 func removeStorageInstanceOps( 292 st *State, 293 owner names.Tag, 294 tag names.StorageTag, 295 assert bson.D, 296 ) ([]txn.Op, error) { 297 298 ops := []txn.Op{{ 299 C: storageInstancesC, 300 Id: tag.Id(), 301 Assert: assert, 302 Remove: true, 303 }} 304 305 machineStorageOp := func(c string, id string) txn.Op { 306 return txn.Op{ 307 C: c, 308 Id: id, 309 Assert: bson.D{{"storageid", tag.Id()}}, 310 Update: bson.D{{"$set", bson.D{{"storageid", ""}}}}, 311 } 312 } 313 314 // If the storage instance has an assigned volume and/or filesystem, 315 // unassign them. Any volumes and filesystems bound to the storage 316 // will be destroyed. 317 volume, err := st.storageInstanceVolume(tag) 318 if err == nil { 319 ops = append(ops, machineStorageOp( 320 volumesC, volume.Tag().Id(), 321 )) 322 if volume.LifeBinding() == tag { 323 ops = append(ops, destroyVolumeOps(st, volume)...) 324 } 325 } else if !errors.IsNotFound(err) { 326 return nil, errors.Trace(err) 327 } 328 filesystem, err := st.storageInstanceFilesystem(tag) 329 if err == nil { 330 ops = append(ops, machineStorageOp( 331 filesystemsC, filesystem.Tag().Id(), 332 )) 333 if filesystem.LifeBinding() == tag { 334 ops = append(ops, destroyFilesystemOps(st, filesystem)...) 335 } 336 } else if !errors.IsNotFound(err) { 337 return nil, errors.Trace(err) 338 } 339 340 // Decrement the charm storage reference count. 341 refcounts, closer := st.getCollection(refcountsC) 342 defer closer() 343 storageName, err := names.StorageName(tag.Id()) 344 if err != nil { 345 return nil, errors.Trace(err) 346 } 347 storageRefcountKey := entityStorageRefcountKey(owner, storageName) 348 decRefOp, _, err := nsRefcounts.DyingDecRefOp(refcounts, storageRefcountKey) 349 if err != nil { 350 return nil, errors.Trace(err) 351 } 352 ops = append(ops, decRefOp) 353 354 return ops, nil 355 } 356 357 // machineAssignable is used by createStorageOps to determine what machine 358 // storage needs to be created. This is implemented by Unit. 359 type machineAssignable interface { 360 machine() (*Machine, error) 361 noAssignedMachineOp() txn.Op 362 } 363 364 // createStorageOps returns txn.Ops for creating storage instances 365 // and attachments for the newly created unit or service. 366 // 367 // The entity tag identifies the entity that owns the storage instance 368 // either a unit or a service. Shared storage instances are owned by a 369 // service, and non-shared storage instances are owned by a unit. 370 // 371 // The charm metadata corresponds to the charm that the owner (service/unit) 372 // is or will be running, and is used to extract storage constraints, 373 // default values, etc. 374 // 375 // The supplied storage constraints are constraints for the storage 376 // instances to be created, keyed on the storage name. These constraints 377 // will be correlated with the charm storage metadata for validation 378 // and supplementing. 379 // 380 // maybeMachineAssignable may be nil, or an machineAssignable which 381 // describes the entity's machine assignment. If the entity is assigned 382 // to a machine, then machine storage will be created. 383 func createStorageOps( 384 st *State, 385 entityTag names.Tag, 386 charmMeta *charm.Meta, 387 cons map[string]StorageConstraints, 388 series string, 389 maybeMachineAssignable machineAssignable, 390 ) (ops []txn.Op, numStorageAttachments int, err error) { 391 392 type template struct { 393 storageName string 394 meta charm.Storage 395 cons StorageConstraints 396 } 397 398 createdShared := false 399 switch entityTag := entityTag.(type) { 400 case names.ApplicationTag: 401 createdShared = true 402 case names.UnitTag: 403 default: 404 return nil, -1, errors.Errorf("expected application or unit tag, got %T", entityTag) 405 } 406 407 // Create storage instances in order of name, to simplify testing. 408 storageNames := set.NewStrings() 409 for name := range cons { 410 storageNames.Add(name) 411 } 412 413 templates := make([]template, 0, len(cons)) 414 for _, store := range storageNames.SortedValues() { 415 cons := cons[store] 416 charmStorage, ok := charmMeta.Storage[store] 417 if !ok { 418 return nil, -1, errors.NotFoundf("charm storage %q", store) 419 } 420 if cons.Count == 0 { 421 continue 422 } 423 if createdShared != charmStorage.Shared { 424 // services only get shared storage instances, 425 // units only get non-shared storage instances. 426 continue 427 } 428 templates = append(templates, template{ 429 storageName: store, 430 meta: charmStorage, 431 cons: cons, 432 }) 433 } 434 435 refcounts, closer := st.getCollection(refcountsC) 436 defer closer() 437 438 ops = make([]txn.Op, 0, len(templates)*3) 439 for _, t := range templates { 440 owner := entityTag.String() 441 var kind StorageKind 442 switch t.meta.Type { 443 case charm.StorageBlock: 444 kind = StorageKindBlock 445 case charm.StorageFilesystem: 446 kind = StorageKindFilesystem 447 default: 448 return nil, -1, errors.Errorf("unknown storage type %q", t.meta.Type) 449 } 450 451 // Increment reference counts for the named storage for each 452 // instance we create. We'll use the reference counts to ensure 453 // we don't exceed limits when adding storage, and for 454 // maintaining model integrity during charm upgrades. 455 storageRefcountKey := entityStorageRefcountKey(entityTag, t.storageName) 456 incRefOp, err := nsRefcounts.CreateOrIncRefOp(refcounts, storageRefcountKey, int(t.cons.Count)) 457 if err != nil { 458 return nil, -1, errors.Trace(err) 459 } 460 ops = append(ops, incRefOp) 461 462 for i := uint64(0); i < t.cons.Count; i++ { 463 id, err := newStorageInstanceId(st, t.storageName) 464 if err != nil { 465 return nil, -1, errors.Annotate(err, "cannot generate storage instance name") 466 } 467 doc := &storageInstanceDoc{ 468 Id: id, 469 Kind: kind, 470 Owner: owner, 471 StorageName: t.storageName, 472 } 473 var machineOps []txn.Op 474 if unitTag, ok := entityTag.(names.UnitTag); ok { 475 doc.AttachmentCount = 1 476 storage := names.NewStorageTag(id) 477 ops = append(ops, createStorageAttachmentOp(storage, unitTag)) 478 numStorageAttachments++ 479 480 if maybeMachineAssignable != nil { 481 var err error 482 machineOps, err = unitAssignedMachineStorageOps( 483 st, unitTag, charmMeta, cons, series, 484 &storageInstance{st, *doc}, 485 maybeMachineAssignable, 486 ) 487 if err != nil { 488 return nil, -1, errors.Annotatef( 489 err, "creating machine storage for storage %s", id, 490 ) 491 } 492 } 493 } 494 ops = append(ops, txn.Op{ 495 C: storageInstancesC, 496 Id: id, 497 Assert: txn.DocMissing, 498 Insert: doc, 499 }) 500 ops = append(ops, machineOps...) 501 } 502 } 503 504 // TODO(axw) create storage attachments for each shared storage 505 // instance owned by the service. 506 // 507 // TODO(axw) prevent creation of shared storage after service 508 // creation, because the only sane time to add storage attachments 509 // is when units are added to said service. 510 511 return ops, numStorageAttachments, nil 512 } 513 514 // unitAssignedMachineStorageOps returns ops for creating volumes, filesystems 515 // and their attachments to the machine that the specified unit is assigned to, 516 // corresponding to the specified storage instance. 517 // 518 // If the unit is not assigned to a machine, then ops will be returned to assert 519 // this, and no error will be returned. 520 func unitAssignedMachineStorageOps( 521 st *State, 522 unitTag names.UnitTag, 523 charmMeta *charm.Meta, 524 cons map[string]StorageConstraints, 525 series string, 526 storage StorageInstance, 527 machineAssignable machineAssignable, 528 ) (ops []txn.Op, err error) { 529 storageParams, err := machineStorageParamsForStorageInstance( 530 st, charmMeta, unitTag, series, cons, storage, 531 ) 532 if err != nil { 533 return nil, errors.Trace(err) 534 } 535 536 m, err := machineAssignable.machine() 537 if err != nil { 538 if errors.IsNotAssigned(err) { 539 // The unit is not assigned to a machine; return 540 // txn.Op that ensures that this remains the case 541 // until the transaction is committed. 542 return []txn.Op{machineAssignable.noAssignedMachineOp()}, nil 543 } 544 return nil, errors.Trace(err) 545 } 546 547 if err := validateDynamicMachineStorageParams(m, storageParams); err != nil { 548 return nil, errors.Trace(err) 549 } 550 storageOps, volumeAttachments, filesystemAttachments, err := st.machineStorageOps( 551 &m.doc, storageParams, 552 ) 553 if err != nil { 554 return nil, errors.Trace(err) 555 } 556 attachmentOps, err := addMachineStorageAttachmentsOps( 557 m, volumeAttachments, filesystemAttachments, 558 ) 559 if err != nil { 560 return nil, errors.Trace(err) 561 } 562 storageOps = append(storageOps, attachmentOps...) 563 return storageOps, nil 564 } 565 566 // createStorageAttachmentOps returns a txn.Op for creating a storage attachment. 567 // The caller is responsible for updating the attachmentcount field of the storage 568 // instance. 569 func createStorageAttachmentOp(storage names.StorageTag, unit names.UnitTag) txn.Op { 570 return txn.Op{ 571 C: storageAttachmentsC, 572 Id: storageAttachmentId(unit.Id(), storage.Id()), 573 Assert: txn.DocMissing, 574 Insert: &storageAttachmentDoc{ 575 Unit: unit.Id(), 576 StorageInstance: storage.Id(), 577 }, 578 } 579 } 580 581 // StorageAttachments returns the StorageAttachments for the specified storage 582 // instance. 583 func (st *State) StorageAttachments(storage names.StorageTag) ([]StorageAttachment, error) { 584 query := bson.D{{"storageid", storage.Id()}} 585 attachments, err := st.storageAttachments(query) 586 if err != nil { 587 return nil, errors.Annotatef(err, "cannot get storage attachments for storage %s", storage.Id()) 588 } 589 return attachments, nil 590 } 591 592 // UnitStorageAttachments returns the StorageAttachments for the specified unit. 593 func (st *State) UnitStorageAttachments(unit names.UnitTag) ([]StorageAttachment, error) { 594 query := bson.D{{"unitid", unit.Id()}} 595 attachments, err := st.storageAttachments(query) 596 if err != nil { 597 return nil, errors.Annotatef(err, "cannot get storage attachments for unit %s", unit.Id()) 598 } 599 return attachments, nil 600 } 601 602 func (st *State) storageAttachments(query bson.D) ([]StorageAttachment, error) { 603 coll, closer := st.getCollection(storageAttachmentsC) 604 defer closer() 605 606 var docs []storageAttachmentDoc 607 if err := coll.Find(query).All(&docs); err != nil { 608 return nil, err 609 } 610 storageAttachments := make([]StorageAttachment, len(docs)) 611 for i, doc := range docs { 612 storageAttachments[i] = &storageAttachment{doc} 613 } 614 return storageAttachments, nil 615 } 616 617 // StorageAttachment returns the StorageAttachment wit hthe specified tags. 618 func (st *State) StorageAttachment(storage names.StorageTag, unit names.UnitTag) (StorageAttachment, error) { 619 att, err := st.storageAttachment(storage, unit) 620 if err != nil { 621 return nil, errors.Trace(err) 622 } 623 return att, nil 624 } 625 626 func (st *State) storageAttachment(storage names.StorageTag, unit names.UnitTag) (*storageAttachment, error) { 627 coll, closer := st.getCollection(storageAttachmentsC) 628 defer closer() 629 var s storageAttachment 630 err := coll.FindId(storageAttachmentId(unit.Id(), storage.Id())).One(&s.doc) 631 if err == mgo.ErrNotFound { 632 return nil, errors.NotFoundf("storage attachment %s:%s", storage.Id(), unit.Id()) 633 } else if err != nil { 634 return nil, errors.Annotatef(err, "cannot get storage attachment %s:%s", storage.Id(), unit.Id()) 635 } 636 return &s, nil 637 } 638 639 // DestroyStorageAttachment ensures that the existing storage attachments of 640 // the specified unit are removed at some point. 641 func (st *State) DestroyUnitStorageAttachments(unit names.UnitTag) (err error) { 642 defer errors.DeferredAnnotatef(&err, "cannot destroy unit %s storage attachments", unit.Id()) 643 buildTxn := func(attempt int) ([]txn.Op, error) { 644 attachments, err := st.UnitStorageAttachments(unit) 645 if err != nil { 646 return nil, errors.Trace(err) 647 } 648 ops := make([]txn.Op, 0, len(attachments)) 649 for _, attachment := range attachments { 650 if attachment.Life() != Alive { 651 continue 652 } 653 ops = append(ops, destroyStorageAttachmentOps( 654 attachment.StorageInstance(), unit, 655 )...) 656 } 657 if len(ops) == 0 { 658 return nil, jujutxn.ErrNoOperations 659 } 660 return ops, nil 661 } 662 return st.run(buildTxn) 663 } 664 665 // DestroyStorageAttachment ensures that the storage attachment will be 666 // removed at some point. 667 func (st *State) DestroyStorageAttachment(storage names.StorageTag, unit names.UnitTag) (err error) { 668 defer errors.DeferredAnnotatef(&err, "cannot destroy storage attachment %s:%s", storage.Id(), unit.Id()) 669 buildTxn := func(attempt int) ([]txn.Op, error) { 670 s, err := st.storageAttachment(storage, unit) 671 if errors.IsNotFound(err) { 672 return nil, jujutxn.ErrNoOperations 673 } else if err != nil { 674 return nil, errors.Trace(err) 675 } 676 if s.doc.Life == Dying { 677 return nil, jujutxn.ErrNoOperations 678 } 679 return destroyStorageAttachmentOps(storage, unit), nil 680 } 681 return st.run(buildTxn) 682 } 683 684 func destroyStorageAttachmentOps(storage names.StorageTag, unit names.UnitTag) []txn.Op { 685 ops := []txn.Op{{ 686 C: storageAttachmentsC, 687 Id: storageAttachmentId(unit.Id(), storage.Id()), 688 Assert: isAliveDoc, 689 Update: bson.D{{"$set", bson.D{{"life", Dying}}}}, 690 }} 691 return ops 692 } 693 694 // Remove removes the storage attachment from state, and may remove its storage 695 // instance as well, if the storage instance is Dying and no other references to 696 // it exist. It will fail if the storage attachment is not Dying. 697 func (st *State) RemoveStorageAttachment(storage names.StorageTag, unit names.UnitTag) (err error) { 698 defer errors.DeferredAnnotatef(&err, "cannot remove storage attachment %s:%s", storage.Id(), unit.Id()) 699 buildTxn := func(attempt int) ([]txn.Op, error) { 700 s, err := st.storageAttachment(storage, unit) 701 if errors.IsNotFound(err) { 702 return nil, jujutxn.ErrNoOperations 703 } else if err != nil { 704 return nil, errors.Trace(err) 705 } 706 inst, err := st.storageInstance(storage) 707 if errors.IsNotFound(err) { 708 // This implies that the attachment was removed 709 // after the call to st.storageAttachment. 710 return nil, jujutxn.ErrNoOperations 711 } else if err != nil { 712 return nil, errors.Trace(err) 713 } 714 ops, err := removeStorageAttachmentOps(st, s, inst) 715 if err != nil { 716 return nil, errors.Trace(err) 717 } 718 return ops, nil 719 } 720 return st.run(buildTxn) 721 } 722 723 func removeStorageAttachmentOps( 724 st *State, 725 s *storageAttachment, 726 si *storageInstance, 727 ) ([]txn.Op, error) { 728 if s.doc.Life != Dying { 729 return nil, errors.New("storage attachment is not dying") 730 } 731 ops := []txn.Op{{ 732 C: storageAttachmentsC, 733 Id: storageAttachmentId(s.doc.Unit, s.doc.StorageInstance), 734 Assert: bson.D{{"life", Dying}}, 735 Remove: true, 736 }, { 737 C: unitsC, 738 Id: s.doc.Unit, 739 Assert: txn.DocExists, 740 Update: bson.D{{"$inc", bson.D{{"storageattachmentcount", -1}}}}, 741 }} 742 if si.doc.AttachmentCount == 1 { 743 var hasLastRef bson.D 744 if si.doc.Life == Dying { 745 hasLastRef = bson.D{{"life", Dying}, {"attachmentcount", 1}} 746 } else if si.doc.Owner == names.NewUnitTag(s.doc.Unit).String() { 747 hasLastRef = bson.D{{"attachmentcount", 1}} 748 } 749 if len(hasLastRef) > 0 { 750 // Either the storage instance is dying, or its owner 751 // is a unit; in either case, no more attachments can 752 // be added to the instance, so it can be removed. 753 siOps, err := removeStorageInstanceOps( 754 st, si.Owner(), si.StorageTag(), hasLastRef, 755 ) 756 if err != nil { 757 return nil, errors.Trace(err) 758 } 759 ops = append(ops, siOps...) 760 return ops, nil 761 } 762 } 763 decrefOp := txn.Op{ 764 C: storageInstancesC, 765 Id: si.doc.Id, 766 Update: bson.D{{"$inc", bson.D{{"attachmentcount", -1}}}}, 767 } 768 if si.doc.Life == Alive { 769 // This may be the last reference, but the storage instance is 770 // still alive. The storage instance will be removed when its 771 // Destroy method is called, if it has no attachments. 772 decrefOp.Assert = bson.D{ 773 {"life", Alive}, 774 {"attachmentcount", bson.D{{"$gt", 0}}}, 775 } 776 } else { 777 // If it's not the last reference when we checked, we want to 778 // allow for concurrent attachment removals but want to ensure 779 // that we don't drop to zero without removing the storage 780 // instance. 781 decrefOp.Assert = bson.D{ 782 {"life", Dying}, 783 {"attachmentcount", bson.D{{"$gt", 1}}}, 784 } 785 } 786 ops = append(ops, decrefOp) 787 return ops, nil 788 } 789 790 // removeStorageInstancesOps returns the transaction operations to remove all 791 // storage instances owned by the specified entity. 792 func removeStorageInstancesOps(st *State, owner names.Tag) ([]txn.Op, error) { 793 coll, closer := st.getCollection(storageInstancesC) 794 defer closer() 795 796 var docs []storageInstanceDoc 797 err := coll.Find(bson.D{{"owner", owner.String()}}).Select(bson.D{{"id", true}}).All(&docs) 798 if err != nil { 799 return nil, errors.Annotatef(err, "cannot get storage instances for %s", owner) 800 } 801 ops := make([]txn.Op, 0, len(docs)) 802 for _, doc := range docs { 803 tag := names.NewStorageTag(doc.Id) 804 storageInstanceOps, err := removeStorageInstanceOps(st, owner, tag, nil) 805 if err != nil { 806 return nil, errors.Trace(err) 807 } 808 ops = append(ops, storageInstanceOps...) 809 } 810 return ops, nil 811 } 812 813 // storageConstraintsDoc contains storage constraints for an entity. 814 type storageConstraintsDoc struct { 815 DocID string `bson:"_id"` 816 ModelUUID string `bson:"model-uuid"` 817 Constraints map[string]StorageConstraints `bson:"constraints"` 818 } 819 820 // StorageConstraints contains the user-specified constraints for provisioning 821 // storage instances for a service unit. 822 type StorageConstraints struct { 823 // Pool is the name of the storage pool from which to provision the 824 // storage instances. 825 Pool string `bson:"pool"` 826 827 // Size is the required size of the storage instances, in MiB. 828 Size uint64 `bson:"size"` 829 830 // Count is the required number of storage instances. 831 Count uint64 `bson:"count"` 832 } 833 834 func createStorageConstraintsOp(key string, cons map[string]StorageConstraints) txn.Op { 835 return txn.Op{ 836 C: storageConstraintsC, 837 Id: key, 838 Assert: txn.DocMissing, 839 Insert: &storageConstraintsDoc{ 840 Constraints: cons, 841 }, 842 } 843 } 844 845 func replaceStorageConstraintsOp(key string, cons map[string]StorageConstraints) txn.Op { 846 return txn.Op{ 847 C: storageConstraintsC, 848 Id: key, 849 Assert: txn.DocExists, 850 Update: bson.D{{"$set", bson.D{{"constraints", cons}}}}, 851 } 852 } 853 854 func removeStorageConstraintsOp(key string) txn.Op { 855 return txn.Op{ 856 C: storageConstraintsC, 857 Id: key, 858 Remove: true, 859 } 860 } 861 862 func readStorageConstraints(st *State, key string) (map[string]StorageConstraints, error) { 863 coll, closer := st.getCollection(storageConstraintsC) 864 defer closer() 865 866 var doc storageConstraintsDoc 867 err := coll.FindId(key).One(&doc) 868 if err == mgo.ErrNotFound { 869 return nil, errors.NotFoundf("storage constraints for %q", key) 870 } 871 if err != nil { 872 return nil, errors.Annotatef(err, "cannot get storage constraints for %q", key) 873 } 874 return doc.Constraints, nil 875 } 876 877 func storageKind(storageType charm.StorageType) storage.StorageKind { 878 kind := storage.StorageKindUnknown 879 switch storageType { 880 case charm.StorageBlock: 881 kind = storage.StorageKindBlock 882 case charm.StorageFilesystem: 883 kind = storage.StorageKindFilesystem 884 } 885 return kind 886 } 887 888 func validateStorageConstraints(st *State, allCons map[string]StorageConstraints, charmMeta *charm.Meta) error { 889 err := validateStorageConstraintsAgainstCharm(st, allCons, charmMeta) 890 if err != nil { 891 return errors.Trace(err) 892 } 893 // Ensure all stores have constraints specified. Defaults should have 894 // been set by this point, if the user didn't specify constraints. 895 for name, charmStorage := range charmMeta.Storage { 896 if _, ok := allCons[name]; !ok && charmStorage.CountMin > 0 { 897 return errors.Errorf("no constraints specified for store %q", name) 898 } 899 } 900 return nil 901 } 902 903 func validateStorageConstraintsAgainstCharm( 904 st *State, 905 allCons map[string]StorageConstraints, 906 charmMeta *charm.Meta, 907 ) error { 908 for name, cons := range allCons { 909 charmStorage, ok := charmMeta.Storage[name] 910 if !ok { 911 return errors.Errorf("charm %q has no store called %q", charmMeta.Name, name) 912 } 913 if charmStorage.Shared { 914 // TODO(axw) implement shared storage support. 915 return errors.Errorf( 916 "charm %q store %q: shared storage support not implemented", 917 charmMeta.Name, name, 918 ) 919 } 920 if cons.Count < uint64(charmStorage.CountMin) { 921 return errors.Errorf( 922 "charm %q store %q: %d instances required, %d specified", 923 charmMeta.Name, name, charmStorage.CountMin, cons.Count, 924 ) 925 } 926 if charmStorage.CountMax >= 0 && cons.Count > uint64(charmStorage.CountMax) { 927 return errors.Errorf( 928 "charm %q store %q: at most %d instances supported, %d specified", 929 charmMeta.Name, name, charmStorage.CountMax, cons.Count, 930 ) 931 } 932 if charmStorage.MinimumSize > 0 && cons.Size < charmStorage.MinimumSize { 933 return errors.Errorf( 934 "charm %q store %q: minimum storage size is %s, %s specified", 935 charmMeta.Name, name, 936 humanize.Bytes(charmStorage.MinimumSize*humanize.MByte), 937 humanize.Bytes(cons.Size*humanize.MByte), 938 ) 939 } 940 kind := storageKind(charmStorage.Type) 941 if err := validateStoragePool(st, cons.Pool, kind, nil); err != nil { 942 return err 943 } 944 } 945 return nil 946 } 947 948 // validateStoragePool validates the storage pool for the model. 949 // If machineId is non-nil, the storage scope will be validated against 950 // the machineId; if the storage is not machine-scoped, then the machineId 951 // will be updated to "". 952 func validateStoragePool( 953 st *State, poolName string, kind storage.StorageKind, machineId *string, 954 ) error { 955 if poolName == "" { 956 return errors.New("pool name is required") 957 } 958 providerType, provider, err := poolStorageProvider(st, poolName) 959 if err != nil { 960 return errors.Trace(err) 961 } 962 963 // Ensure the storage provider supports the specified kind. 964 kindSupported := provider.Supports(kind) 965 if !kindSupported && kind == storage.StorageKindFilesystem { 966 // Filesystems can be created if either filesystem 967 // or block storage are supported. 968 if provider.Supports(storage.StorageKindBlock) { 969 kindSupported = true 970 // The filesystem is to be backed by a volume, 971 // so the filesystem must be managed on the 972 // machine. Skip the scope-check below by 973 // setting the pointer to nil. 974 machineId = nil 975 } 976 } 977 if !kindSupported { 978 return errors.Errorf("%q provider does not support %q storage", providerType, kind) 979 } 980 981 // Check the storage scope. 982 if machineId != nil { 983 switch provider.Scope() { 984 case storage.ScopeMachine: 985 if *machineId == "" { 986 return errors.Annotate(err, "machine unspecified for machine-scoped storage") 987 } 988 default: 989 // The storage is not machine-scoped, so we clear out 990 // the machine ID to inform the caller that the storage 991 // scope should be the model. 992 *machineId = "" 993 } 994 } 995 996 return nil 997 } 998 999 func poolStorageProvider(st *State, poolName string) (storage.ProviderType, storage.Provider, error) { 1000 registry, err := st.storageProviderRegistry() 1001 if err != nil { 1002 return "", nil, errors.Annotate(err, "getting storage provider registry") 1003 } 1004 poolManager := poolmanager.New(NewStateSettings(st), registry) 1005 pool, err := poolManager.Get(poolName) 1006 if errors.IsNotFound(err) { 1007 // If there's no pool called poolName, maybe a provider type 1008 // has been specified directly. 1009 providerType := storage.ProviderType(poolName) 1010 provider, err1 := registry.StorageProvider(providerType) 1011 if err1 != nil { 1012 // The name can't be resolved as a storage provider type, 1013 // so return the original "pool not found" error. 1014 return "", nil, errors.Trace(err) 1015 } 1016 return providerType, provider, nil 1017 } else if err != nil { 1018 return "", nil, errors.Trace(err) 1019 } 1020 providerType := pool.Provider() 1021 provider, err := registry.StorageProvider(providerType) 1022 if err != nil { 1023 return "", nil, errors.Trace(err) 1024 } 1025 return providerType, provider, nil 1026 } 1027 1028 // ErrNoDefaultStoragePool is returned when a storage pool is required but none 1029 // is specified nor available as a default. 1030 var ErrNoDefaultStoragePool = fmt.Errorf("no storage pool specifed and no default available") 1031 1032 // addDefaultStorageConstraints fills in default constraint values, replacing any empty/missing values 1033 // in the specified constraints. 1034 func addDefaultStorageConstraints(st *State, allCons map[string]StorageConstraints, charmMeta *charm.Meta) error { 1035 conf, err := st.ModelConfig() 1036 if err != nil { 1037 return errors.Trace(err) 1038 } 1039 1040 for name, charmStorage := range charmMeta.Storage { 1041 cons, ok := allCons[name] 1042 if !ok { 1043 if charmStorage.Shared { 1044 // TODO(axw) get the model's default shared storage 1045 // pool, and create constraints here. 1046 return errors.Errorf( 1047 "no constraints specified for shared charm storage %q", 1048 name, 1049 ) 1050 } 1051 } 1052 cons, err := storageConstraintsWithDefaults(conf, charmStorage, name, cons) 1053 if err != nil { 1054 return errors.Trace(err) 1055 } 1056 // Replace in case pool or size were updated. 1057 allCons[name] = cons 1058 } 1059 return nil 1060 } 1061 1062 // storageConstraintsWithDefaults returns a constraints 1063 // derived from cons, with any defaults filled in. 1064 func storageConstraintsWithDefaults( 1065 cfg *config.Config, 1066 charmStorage charm.Storage, 1067 name string, 1068 cons StorageConstraints, 1069 ) (StorageConstraints, error) { 1070 withDefaults := cons 1071 1072 // If no pool is specified, determine the pool from the env config and other constraints. 1073 if cons.Pool == "" { 1074 kind := storageKind(charmStorage.Type) 1075 poolName, err := defaultStoragePool(cfg, kind, cons) 1076 if err != nil { 1077 return withDefaults, errors.Annotatef(err, "finding default pool for %q storage", name) 1078 } 1079 withDefaults.Pool = poolName 1080 } 1081 1082 // If no size is specified, we default to the min size specified by the 1083 // charm, or 1GiB. 1084 if cons.Size == 0 { 1085 if charmStorage.MinimumSize > 0 { 1086 withDefaults.Size = charmStorage.MinimumSize 1087 } else { 1088 withDefaults.Size = 1024 1089 } 1090 } 1091 if cons.Count == 0 { 1092 withDefaults.Count = uint64(charmStorage.CountMin) 1093 } 1094 return withDefaults, nil 1095 } 1096 1097 // defaultStoragePool returns the default storage pool for the model. 1098 // The default pool is either user specified, or one that is registered by the provider itself. 1099 func defaultStoragePool(cfg *config.Config, kind storage.StorageKind, cons StorageConstraints) (string, error) { 1100 switch kind { 1101 case storage.StorageKindBlock: 1102 loopPool := string(provider.LoopProviderType) 1103 1104 emptyConstraints := StorageConstraints{} 1105 if cons == emptyConstraints { 1106 // No constraints at all: use loop. 1107 return loopPool, nil 1108 } 1109 // Either size or count specified, use env default. 1110 defaultPool, ok := cfg.StorageDefaultBlockSource() 1111 if !ok { 1112 defaultPool = loopPool 1113 } 1114 return defaultPool, nil 1115 1116 case storage.StorageKindFilesystem: 1117 rootfsPool := string(provider.RootfsProviderType) 1118 emptyConstraints := StorageConstraints{} 1119 if cons == emptyConstraints { 1120 return rootfsPool, nil 1121 } 1122 1123 // TODO(axw) add env configuration for default 1124 // filesystem source, prefer that. 1125 defaultPool, ok := cfg.StorageDefaultBlockSource() 1126 if !ok { 1127 defaultPool = rootfsPool 1128 } 1129 return defaultPool, nil 1130 } 1131 return "", ErrNoDefaultStoragePool 1132 } 1133 1134 // AddStorageForUnit adds storage instances to given unit as specified. 1135 // 1136 // Missing storage constraints are populated based on model defaults. 1137 // Storage store name is used to retrieve existing storage instances 1138 // for this store. Combination of existing storage instances and 1139 // anticipated additional storage instances is validated against the 1140 // store as specified in the charm. 1141 func (st *State) AddStorageForUnit( 1142 tag names.UnitTag, name string, cons StorageConstraints, 1143 ) error { 1144 u, err := st.Unit(tag.Id()) 1145 if err != nil { 1146 return errors.Trace(err) 1147 } 1148 buildTxn := func(attempt int) ([]txn.Op, error) { 1149 if attempt > 0 { 1150 if err := u.Refresh(); err != nil { 1151 return nil, errors.Trace(err) 1152 } 1153 } 1154 return st.addStorageForUnitOps(u, name, cons) 1155 } 1156 if err := st.run(buildTxn); err != nil { 1157 return errors.Annotatef(err, "adding storage to unit %s", u) 1158 } 1159 return nil 1160 } 1161 1162 // addStorage adds storage instances to given unit as specified. 1163 func (st *State) addStorageForUnitOps( 1164 u *Unit, 1165 storageName string, 1166 cons StorageConstraints, 1167 ) ([]txn.Op, error) { 1168 if u.Life() != Alive { 1169 return nil, unitNotAliveErr 1170 } 1171 1172 // Storage addition is based on the charm metadata, so make sure that 1173 // the charm URL for the unit or application does not change during 1174 // the transaction. If the unit does not have a charm URL set yet, 1175 // then we use the application's charm URL. 1176 ops := []txn.Op{{ 1177 C: unitsC, 1178 Id: u.doc.Name, 1179 Assert: bson.D{{"charmurl", u.doc.CharmURL}}, 1180 }} 1181 curl, ok := u.CharmURL() 1182 if !ok { 1183 a, err := u.Application() 1184 if err != nil { 1185 return nil, errors.Annotatef(err, "getting application for unit %v", u.doc.Name) 1186 } 1187 curl = a.doc.CharmURL 1188 ops = append(ops, txn.Op{ 1189 C: applicationsC, 1190 Id: a.doc.Name, 1191 Assert: bson.D{{"charmurl", curl}}, 1192 }) 1193 } 1194 ch, err := st.Charm(curl) 1195 if err != nil { 1196 return nil, errors.Trace(err) 1197 } 1198 charmMeta := ch.Meta() 1199 charmStorageMeta, ok := charmMeta.Storage[storageName] 1200 if !ok { 1201 return nil, errors.NotFoundf("charm storage %q", storageName) 1202 } 1203 1204 // Populate missing configuration parameters with default values. 1205 modelConfig, err := st.ModelConfig() 1206 if err != nil { 1207 return nil, errors.Trace(err) 1208 } 1209 completeCons, err := storageConstraintsWithDefaults( 1210 modelConfig, 1211 charmStorageMeta, 1212 storageName, 1213 cons, 1214 ) 1215 if err != nil { 1216 return nil, errors.Trace(err) 1217 } 1218 1219 // This can happen for charm stores that specify instances range from 0, 1220 // and no count was specified at deploy as storage constraints for this store, 1221 // and no count was specified to storage add as a contraint either. 1222 if cons.Count == 0 { 1223 return nil, errors.NotValidf("adding storage where instance count is 0") 1224 } 1225 1226 addUnitStorageOps, err := st.addUnitStorageOps(charmMeta, u, storageName, completeCons, -1) 1227 if err != nil { 1228 return nil, errors.Trace(err) 1229 } 1230 ops = append(ops, addUnitStorageOps...) 1231 return ops, nil 1232 } 1233 1234 // addUnitStorageOps returns transaction ops to create storage for the given 1235 // unit. If countMin is non-negative, the Count field of the constraints will 1236 // be ignored, and as many storage instances as necessary to make up the 1237 // shortfall will be created. 1238 func (st *State) addUnitStorageOps( 1239 charmMeta *charm.Meta, 1240 u *Unit, 1241 storageName string, 1242 cons StorageConstraints, 1243 countMin int, 1244 ) ([]txn.Op, error) { 1245 currentCountOp, currentCount, err := st.countEntityStorageInstances(u.Tag(), storageName) 1246 if err != nil { 1247 return nil, errors.Trace(err) 1248 } 1249 ops := []txn.Op{currentCountOp} 1250 if countMin >= 0 { 1251 if currentCount >= countMin { 1252 return ops, nil 1253 } 1254 cons.Count = uint64(countMin - currentCount) 1255 } 1256 1257 consTotal := cons 1258 consTotal.Count += uint64(currentCount) 1259 if err := validateStorageConstraintsAgainstCharm(st, 1260 map[string]StorageConstraints{storageName: consTotal}, 1261 charmMeta, 1262 ); err != nil { 1263 return nil, errors.Trace(err) 1264 } 1265 1266 // Create storage db operations 1267 storageOps, _, err := createStorageOps( 1268 st, 1269 u.Tag(), 1270 charmMeta, 1271 map[string]StorageConstraints{storageName: cons}, 1272 u.Series(), 1273 u, 1274 ) 1275 if err != nil { 1276 return nil, errors.Trace(err) 1277 } 1278 ops = append(ops, txn.Op{ 1279 C: unitsC, 1280 Id: u.doc.DocID, 1281 Assert: isAliveDoc, 1282 Update: bson.D{{"$inc", 1283 bson.D{{"storageattachmentcount", int(cons.Count)}}}}, 1284 }) 1285 return append(ops, storageOps...), nil 1286 } 1287 1288 func (st *State) countEntityStorageInstances(owner names.Tag, name string) (txn.Op, int, error) { 1289 refcounts, closer := st.getCollection(refcountsC) 1290 defer closer() 1291 key := entityStorageRefcountKey(owner, name) 1292 return nsRefcounts.CurrentOp(refcounts, key) 1293 }