github.com/juju/juju@v0.0.0-20240430160146-1752b71fcf00/state/application.go (about) 1 // Copyright 2012, 2013 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package state 5 6 import ( 7 stderrors "errors" 8 "fmt" 9 "net" 10 "sort" 11 "strconv" 12 "strings" 13 14 "github.com/juju/charm/v12" 15 "github.com/juju/collections/set" 16 "github.com/juju/errors" 17 "github.com/juju/mgo/v3" 18 "github.com/juju/mgo/v3/bson" 19 "github.com/juju/mgo/v3/txn" 20 "github.com/juju/names/v5" 21 "github.com/juju/schema" 22 jujutxn "github.com/juju/txn/v3" 23 "github.com/juju/utils/v3" 24 "github.com/juju/version/v2" 25 "gopkg.in/juju/environschema.v1" 26 27 "github.com/juju/juju/core/arch" 28 corebase "github.com/juju/juju/core/base" 29 corecharm "github.com/juju/juju/core/charm" 30 "github.com/juju/juju/core/config" 31 "github.com/juju/juju/core/constraints" 32 "github.com/juju/juju/core/leadership" 33 "github.com/juju/juju/core/model" 34 "github.com/juju/juju/core/network" 35 "github.com/juju/juju/core/network/firewall" 36 "github.com/juju/juju/core/status" 37 mgoutils "github.com/juju/juju/mongo/utils" 38 stateerrors "github.com/juju/juju/state/errors" 39 "github.com/juju/juju/tools" 40 ) 41 42 // ExposedEndpoint encapsulates the expose-related details of a particular 43 // application endpoint with respect to the sources (CIDRs or space IDs) that 44 // should be able to access the ports opened by the application charm for an 45 // endpoint. 46 type ExposedEndpoint struct { 47 // A list of spaces that should be able to reach the opened ports 48 // for an exposed application's endpoint. 49 ExposeToSpaceIDs []string `bson:"to-space-ids,omitempty"` 50 51 // A list of CIDRs that should be able to reach the opened ports 52 // for an exposed application's endpoint. 53 ExposeToCIDRs []string `bson:"to-cidrs,omitempty"` 54 } 55 56 // AllowTrafficFromAnyNetwork returns true if the exposed endpoint parameters 57 // include the 0.0.0.0/0 CIDR. 58 func (exp ExposedEndpoint) AllowTrafficFromAnyNetwork() bool { 59 for _, cidr := range exp.ExposeToCIDRs { 60 if cidr == firewall.AllNetworksIPV4CIDR || cidr == firewall.AllNetworksIPV6CIDR { 61 return true 62 } 63 } 64 65 return false 66 } 67 68 // Application represents the state of an application. 69 type Application struct { 70 st *State 71 doc applicationDoc 72 } 73 74 // applicationDoc represents the internal state of an application in MongoDB. 75 // Note the correspondence with ApplicationInfo in apiserver. 76 type applicationDoc struct { 77 DocID string `bson:"_id"` 78 Name string `bson:"name"` 79 ModelUUID string `bson:"model-uuid"` 80 Subordinate bool `bson:"subordinate"` 81 // CharmURL should be moved to CharmOrigin. Attempting it should 82 // be relatively straight forward, but very time consuming. 83 // When moving to CharmHub from Juju it should be 84 // tackled then. 85 CharmURL *string `bson:"charmurl"` 86 CharmOrigin CharmOrigin `bson:"charm-origin"` 87 // CharmModifiedVersion changes will trigger the upgrade-charm hook 88 // for units independent of charm url changes. 89 CharmModifiedVersion int `bson:"charmmodifiedversion"` 90 ForceCharm bool `bson:"forcecharm"` 91 Life Life `bson:"life"` 92 UnitCount int `bson:"unitcount"` 93 RelationCount int `bson:"relationcount"` 94 MinUnits int `bson:"minunits"` 95 Tools *tools.Tools `bson:",omitempty"` 96 TxnRevno int64 `bson:"txn-revno"` 97 MetricCredentials []byte `bson:"metric-credentials"` 98 99 // Exposed is set to true when the application is exposed. 100 Exposed bool `bson:"exposed"` 101 102 // A map for tracking the per-endpoint expose-related parameters for 103 // an exposed app where keys are endpoint names or the "" value which 104 // represents all application endpoints. 105 ExposedEndpoints map[string]ExposedEndpoint `bson:"exposed-endpoints,omitempty"` 106 107 // CAAS related attributes. 108 DesiredScale int `bson:"scale"` 109 PasswordHash string `bson:"passwordhash"` 110 ProvisioningState *ApplicationProvisioningState `bson:"provisioning-state"` 111 112 // Placement is the placement directive that should be used allocating units/pods. 113 Placement string `bson:"placement,omitempty"` 114 // HasResources is set to false after an application has been removed 115 // and any k8s cluster resources have been fully cleaned up. 116 // Until then, the application must not be removed from the Juju model. 117 HasResources bool `bson:"has-resources,omitempty"` 118 } 119 120 // ApplicationProvisioningState is the CAAS application provisioning state for an 121 // application. 122 type ApplicationProvisioningState struct { 123 Scaling bool `bson:"scaling"` 124 ScaleTarget int `bson:"scale-target"` 125 } 126 127 func newApplication(st *State, doc *applicationDoc) *Application { 128 app := &Application{ 129 st: st, 130 doc: *doc, 131 } 132 return app 133 } 134 135 // IsRemote returns false for a local application. 136 func (a *Application) IsRemote() bool { 137 return false 138 } 139 140 // Name returns the application name. 141 func (a *Application) Name() string { 142 return a.doc.Name 143 } 144 145 // Tag returns a name identifying the application. 146 // The returned name will be different from other Tag values returned by any 147 // other entities from the same state. 148 func (a *Application) Tag() names.Tag { 149 return a.ApplicationTag() 150 } 151 152 // ApplicationTag returns the more specific ApplicationTag rather than the generic 153 // Tag. 154 func (a *Application) ApplicationTag() names.ApplicationTag { 155 return names.NewApplicationTag(a.Name()) 156 } 157 158 // applicationGlobalKey returns the global database key for the application 159 // with the given name. 160 func applicationGlobalKey(appName string) string { 161 return "a#" + appName 162 } 163 164 // globalKey returns the global database key for the application. 165 func (a *Application) globalKey() string { 166 return applicationGlobalKey(a.doc.Name) 167 } 168 169 func applicationGlobalOperatorKey(appName string) string { 170 return applicationGlobalKey(appName) + "#operator" 171 } 172 173 func applicationCharmConfigKey(appName string, curl *string) string { 174 return fmt.Sprintf("a#%s#%s", appName, *curl) 175 } 176 177 // charmConfigKey returns the charm-version-specific settings collection 178 // key for the application. 179 func (a *Application) charmConfigKey() string { 180 return applicationCharmConfigKey(a.doc.Name, a.doc.CharmURL) 181 } 182 183 func applicationConfigKey(appName string) string { 184 return fmt.Sprintf("a#%s#application", appName) 185 } 186 187 // applicationConfigKey returns the charm-version-specific settings collection 188 // key for the application. 189 func (a *Application) applicationConfigKey() string { 190 return applicationConfigKey(a.doc.Name) 191 } 192 193 func applicationStorageConstraintsKey(appName string, curl *string) string { 194 return fmt.Sprintf("asc#%s#%s", appName, *curl) 195 } 196 197 // storageConstraintsKey returns the charm-version-specific storage 198 // constraints collection key for the application. 199 func (a *Application) storageConstraintsKey() string { 200 return applicationStorageConstraintsKey(a.doc.Name, a.doc.CharmURL) 201 } 202 203 func applicationDeviceConstraintsKey(appName string, curl *string) string { 204 return fmt.Sprintf("adc#%s#%s", appName, *curl) 205 } 206 207 // deviceConstraintsKey returns the charm-version-specific device 208 // constraints collection key for the application. 209 func (a *Application) deviceConstraintsKey() string { 210 return applicationDeviceConstraintsKey(a.doc.Name, a.doc.CharmURL) 211 } 212 213 // Base returns the specified base for this charm. 214 func (a *Application) Base() Base { 215 return Base{OS: a.doc.CharmOrigin.Platform.OS, Channel: a.doc.CharmOrigin.Platform.Channel} 216 } 217 218 // Life returns whether the application is Alive, Dying or Dead. 219 func (a *Application) Life() Life { 220 return a.doc.Life 221 } 222 223 // AgentTools returns the tools that the operator is currently running. 224 // It an error that satisfies errors.IsNotFound if the tools have not 225 // yet been set. 226 func (a *Application) AgentTools() (*tools.Tools, error) { 227 if a.doc.Tools == nil { 228 return nil, errors.NotFoundf("operator image metadata for application %q", a) 229 } 230 result := *a.doc.Tools 231 return &result, nil 232 } 233 234 // SetAgentVersion sets the Tools value in applicationDoc. 235 func (a *Application) SetAgentVersion(v version.Binary) (err error) { 236 defer errors.DeferredAnnotatef(&err, "cannot set agent version for application %q", a) 237 if err = checkVersionValidity(v); err != nil { 238 return errors.Trace(err) 239 } 240 versionedTool := &tools.Tools{Version: v} 241 ops := []txn.Op{{ 242 C: applicationsC, 243 Id: a.doc.DocID, 244 Assert: notDeadDoc, 245 Update: bson.D{{"$set", bson.D{{"tools", versionedTool}}}}, 246 }} 247 if err := a.st.db().RunTransaction(ops); err != nil { 248 return onAbort(err, stateerrors.ErrDead) 249 } 250 a.doc.Tools = versionedTool 251 return nil 252 } 253 254 // SetProvisioningState sets the provisioning state for the application. 255 func (a *Application) SetProvisioningState(ps ApplicationProvisioningState) error { 256 // TODO: Treat dying/dead scale to 0 as a separate call. 257 life := a.Life() 258 assertions := bson.D{ 259 {"life", life}, 260 {"provisioning-state", a.doc.ProvisioningState}, 261 } 262 sets := bson.D{{"provisioning-state", ps}} 263 if ps.Scaling { 264 switch life { 265 case Alive: 266 alreadyScaling := false 267 if a.doc.ProvisioningState != nil && a.doc.ProvisioningState.Scaling { 268 alreadyScaling = true 269 } 270 if !alreadyScaling && ps.Scaling { 271 // if starting a scale, ensure we are scaling to the same target. 272 assertions = append(assertions, bson.DocElem{ 273 "scale", ps.ScaleTarget, 274 }) 275 } 276 case Dying, Dead: 277 // force scale to the scale target when dying/dead. 278 sets = append(sets, bson.DocElem{ 279 "scale", ps.ScaleTarget, 280 }) 281 } 282 } 283 284 ops := []txn.Op{{ 285 C: applicationsC, 286 Id: a.doc.DocID, 287 Assert: assertions, 288 Update: bson.D{{"$set", sets}}, 289 }} 290 if err := a.st.db().RunTransaction(ops); errors.Is(err, txn.ErrAborted) { 291 return stateerrors.ProvisioningStateInconsistent 292 } else if err != nil { 293 return errors.Annotatef(err, "failed to set provisioning-state for application %q", a) 294 } 295 a.doc.ProvisioningState = &ps 296 return nil 297 } 298 299 // ProvisioningState returns the provisioning state for the application. 300 func (a *Application) ProvisioningState() *ApplicationProvisioningState { 301 if a.doc.ProvisioningState == nil { 302 return nil 303 } 304 ps := *a.doc.ProvisioningState 305 return &ps 306 } 307 308 var errRefresh = stderrors.New("state seems inconsistent, refresh and try again") 309 310 // Destroy ensures that the application and all its relations will be removed at 311 // some point; if the application has no units, and no relation involving the 312 // application has any units in scope, they are all removed immediately. 313 func (a *Application) Destroy() (err error) { 314 op := a.DestroyOperation() 315 defer func() { 316 logger.Tracef("Application(%s).Destroy() => %v", a.doc.Name, err) 317 if err == nil { 318 // After running the destroy ops, app life is either Dying, 319 // or it may be set to Dead. If removed, life will also be marked as Dead. 320 a.doc.Life = op.PostDestroyAppLife 321 } 322 }() 323 err = a.st.ApplyOperation(op) 324 if len(op.Errors) != 0 { 325 logger.Warningf("operational errors destroying application %v: %v", a.Name(), op.Errors) 326 } 327 return err 328 } 329 330 // DestroyOperation returns a model operation that will destroy the application. 331 func (a *Application) DestroyOperation() *DestroyApplicationOperation { 332 return &DestroyApplicationOperation{ 333 app: &Application{st: a.st, doc: a.doc}, 334 } 335 } 336 337 // DestroyApplicationOperation is a model operation for destroying an 338 // application. 339 type DestroyApplicationOperation struct { 340 // app holds the application to destroy. 341 app *Application 342 343 // DestroyStorage controls whether or not storage attached 344 // to units of the application are destroyed. If this is false, 345 // then detachable storage will be detached and left in the model. 346 DestroyStorage bool 347 348 // RemoveOffers controls whether or not application offers 349 // are removed. If this is false, then the operation will 350 // fail if there are any offers remaining. 351 RemoveOffers bool 352 353 // CleanupIgnoringResources is true if this operation has been 354 // scheduled by a forced cleanup task. 355 CleanupIgnoringResources bool 356 357 // Removed is true if the application is removed during destroy. 358 Removed bool 359 360 // PostDestroyAppLife is the life of the app if destroy completes without error. 361 PostDestroyAppLife Life 362 363 // ForcedOperation stores needed information to force this operation. 364 ForcedOperation 365 } 366 367 // Build is part of the ModelOperation interface. 368 func (op *DestroyApplicationOperation) Build(attempt int) ([]txn.Op, error) { 369 if attempt > 0 { 370 if err := op.app.Refresh(); errors.IsNotFound(err) { 371 return nil, jujutxn.ErrNoOperations 372 } else if err != nil { 373 return nil, err 374 } 375 } 376 // This call returns needed operations to destroy an application. 377 // All operational errors are added to 'op' struct 378 // and may be of interest to the user. Without 'force', these errors are considered fatal. 379 // If 'force' is specified, they are treated as non-fatal - they will not prevent further 380 // processing: we'll still try to remove application. 381 ops, err := op.destroyOps() 382 switch errors.Cause(err) { 383 case errRefresh: 384 return nil, jujutxn.ErrTransientFailure 385 case errAlreadyDying: 386 return nil, jujutxn.ErrNoOperations 387 case nil: 388 if len(op.Errors) == 0 { 389 return ops, nil 390 } 391 if op.Force { 392 logger.Debugf("forcing application removal") 393 return ops, nil 394 } 395 // Should be impossible to reach as--by convention--we return an error and 396 // an empty ops slice when a force-able error occurs and we're running !op.Force 397 err = errors.Errorf("errors encountered: %q", op.Errors) 398 } 399 return nil, err 400 } 401 402 // Done is part of the ModelOperation interface. 403 func (op *DestroyApplicationOperation) Done(err error) error { 404 if err == nil { 405 if err := op.eraseHistory(); err != nil { 406 if !op.Force { 407 logger.Errorf("cannot delete history for application %q: %v", op.app, err) 408 } 409 op.AddError(errors.Errorf("force erase application %q history proceeded despite encountering ERROR %v", op.app, err)) 410 } 411 // Only delete secrets after application is removed. 412 if !op.Removed { 413 return nil 414 } 415 if err := op.deleteSecrets(); err != nil { 416 logger.Errorf("cannot delete secrets for application %q: %v", op.app, err) 417 } 418 return nil 419 } 420 connected, err2 := applicationHasConnectedOffers(op.app.st, op.app.Name()) 421 if err2 != nil { 422 err = errors.Trace(err2) 423 } else if connected { 424 rels, err2 := op.app.st.AllRelations() 425 if err2 != nil { 426 err = errors.Trace(err2) 427 } else { 428 n := 0 429 for _, r := range rels { 430 if _, isCrossModel, err := r.RemoteApplication(); err == nil && isCrossModel { 431 n++ 432 } 433 } 434 err = errors.Errorf("application is used by %d consumer%s", n, plural(n)) 435 } 436 } else { 437 err = errors.NewNotSupported(err, "change to the application detected") 438 } 439 440 return errors.Annotatef(err, "cannot destroy application %q", op.app) 441 } 442 443 func (op *DestroyApplicationOperation) eraseHistory() error { 444 var stop <-chan struct{} // stop not used here yet. 445 if err := eraseStatusHistory(stop, op.app.st, op.app.globalKey()); err != nil { 446 one := errors.Annotate(err, "application") 447 if op.FatalError(one) { 448 return one 449 } 450 } 451 return nil 452 } 453 454 func (op *DestroyApplicationOperation) deleteSecrets() error { 455 ownedURIs, err := op.app.st.referencedSecrets(op.app.Tag(), "owner-tag") 456 if err != nil { 457 return errors.Trace(err) 458 } 459 if _, err := op.app.st.deleteSecrets(ownedURIs); err != nil { 460 return errors.Annotatef(err, "deleting owned secrets for %q", op.app.Name()) 461 } 462 // TODO(juju4) - remove 463 if err := op.app.st.RemoveSecretConsumer(op.app.Tag()); err != nil { 464 return errors.Annotatef(err, "deleting secret consumer records for %q", op.app.Name()) 465 } 466 return nil 467 } 468 469 // destroyOps returns the operations required to destroy the application. If it 470 // returns errRefresh, the application should be refreshed and the destruction 471 // operations recalculated. 472 // 473 // When this operation has 'force' set, all operational errors are considered non-fatal 474 // and are accumulated on the operation. 475 // This method will return all operations we can construct despite errors. 476 // 477 // When the 'force' is not set, any operational errors will be considered fatal. All operations 478 // constructed up until the error will be discarded and the error will be returned. 479 func (op *DestroyApplicationOperation) destroyOps() ([]txn.Op, error) { 480 rels, err := op.app.Relations() 481 if op.FatalError(err) { 482 return nil, err 483 } 484 if len(rels) != op.app.doc.RelationCount { 485 // This is just an early bail out. The relations obtained may still 486 // be wrong, but that situation will be caught by a combination of 487 // asserts on relationcount and on each known relation, below. 488 logger.Tracef("DestroyApplicationOperation(%s).destroyOps mismatched relation count %d != %d", 489 op.app.doc.Name, len(rels), op.app.doc.RelationCount) 490 return nil, errRefresh 491 } 492 var ops []txn.Op 493 minUnitsExists, err := doesMinUnitsExist(op.app.st, op.app.Name()) 494 if err != nil { 495 return nil, errors.Trace(err) 496 } 497 if minUnitsExists { 498 ops = []txn.Op{minUnitsRemoveOp(op.app.st, op.app.doc.Name)} 499 } 500 removeCount := 0 501 failedRels := false 502 for _, rel := range rels { 503 // When forced, this call will return both operations to remove this 504 // relation as well as all operational errors encountered. 505 // If the 'force' is not set and the call came across some errors, 506 // these errors will be fatal and no operations will be returned. 507 relOps, isRemove, err := rel.destroyOps(op.app.doc.Name, &op.ForcedOperation) 508 if errors.Cause(err) == errAlreadyDying { 509 relOps = []txn.Op{{ 510 C: relationsC, 511 Id: rel.doc.DocID, 512 Assert: bson.D{{"life", Dying}}, 513 }} 514 } else if err != nil { 515 op.AddError(err) 516 failedRels = true 517 continue 518 } 519 if isRemove { 520 removeCount++ 521 } 522 ops = append(ops, relOps...) 523 } 524 op.PostDestroyAppLife = Dying 525 if !op.Force && failedRels { 526 return nil, op.LastError() 527 } 528 resOps, err := removeResourcesOps(op.app.st, op.app.doc.Name) 529 if op.FatalError(err) { 530 return nil, errors.Trace(err) 531 } 532 ops = append(ops, resOps...) 533 534 removeUnitAssignmentOps, err := op.app.removeUnitAssignmentsOps() 535 if err != nil { 536 return nil, errors.Trace(err) 537 } 538 ops = append(ops, removeUnitAssignmentOps...) 539 540 // We can't delete an application if it is being offered, 541 // unless those offers have no relations. 542 if !op.RemoveOffers { 543 countOp, n, err := countApplicationOffersRefOp(op.app.st, op.app.Name()) 544 if err != nil { 545 return nil, errors.Trace(err) 546 } 547 if n == 0 { 548 ops = append(ops, countOp) 549 } else { 550 connected, err := applicationHasConnectedOffers(op.app.st, op.app.Name()) 551 if err != nil { 552 return nil, errors.Trace(err) 553 } 554 if connected { 555 return nil, errors.Errorf("application is used by %d offer%s", n, plural(n)) 556 } 557 // None of our offers are connected, 558 // it's safe to remove them. 559 removeOfferOps, err := removeApplicationOffersOps(op.app.st, op.app.Name()) 560 if err != nil { 561 return nil, errors.Trace(err) 562 } 563 ops = append(ops, removeOfferOps...) 564 ops = append(ops, txn.Op{ 565 C: applicationsC, 566 Id: op.app.doc.DocID, 567 Assert: bson.D{ 568 // We're using the txn-revno here because relationcount is too 569 // coarse-grained for what we need. Using the revno will 570 // create false positives during concurrent updates of the 571 // model, but eliminates the possibility of it entering 572 // an inconsistent state. 573 {"txn-revno", op.app.doc.TxnRevno}, 574 }, 575 }) 576 } 577 } 578 579 branchOps, err := op.unassignBranchOps() 580 if err != nil { 581 if !op.Force { 582 return nil, errors.Trace(err) 583 } 584 op.AddError(err) 585 } 586 ops = append(ops, branchOps...) 587 588 // If the application has no units, and all its known relations will be 589 // removed, the application can also be removed, so long as there are 590 // no other cluster resources, as can be the case for k8s charms. 591 if op.app.doc.UnitCount == 0 && op.app.doc.RelationCount == removeCount { 592 logger.Tracef("DestroyApplicationOperation(%s).destroyOps removing application", op.app.doc.Name) 593 // If we're forcing destruction the assertion shouldn't be that 594 // life is alive, but that it's what we think it is now. 595 assertion := bson.D{ 596 {"life", op.app.doc.Life}, 597 {"unitcount", 0}, 598 {"relationcount", removeCount}, 599 } 600 601 // There are resources pending so don't remove app yet. 602 if op.app.doc.HasResources && !op.CleanupIgnoringResources { 603 if op.Force { 604 // We need to wait longer than normal for any k8s resources to be fully removed 605 // since it can take a while for the cluster to terminate running pods etc. 606 logger.Debugf("scheduling forced application %q cleanup", op.app.doc.Name) 607 deadline := op.app.st.stateClock.Now().Add(2 * op.MaxWait) 608 cleanupOp := newCleanupAtOp(deadline, cleanupForceApplication, op.app.doc.Name, op.MaxWait) 609 ops = append(ops, cleanupOp) 610 } 611 logger.Debugf("advancing application %q to dead, waiting for cluster resources", op.app.doc.Name) 612 update := bson.D{{"$set", bson.D{{"life", Dead}}}} 613 if removeCount != 0 { 614 decref := bson.D{{"$inc", bson.D{{"relationcount", -removeCount}}}} 615 update = append(update, decref...) 616 } 617 advanceLifecycleOp := txn.Op{ 618 C: applicationsC, 619 Id: op.app.doc.DocID, 620 Assert: assertion, 621 Update: update, 622 } 623 op.PostDestroyAppLife = Dead 624 return append(ops, advanceLifecycleOp), nil 625 } 626 627 // When forced, this call will return operations to remove this 628 // application and accumulate all operational errors encountered in the operation. 629 // If the 'force' is not set and the call came across some errors, 630 // these errors will be fatal and no operations will be returned. 631 removeOps, err := op.app.removeOps(assertion, &op.ForcedOperation) 632 if err != nil { 633 if !op.Force || errors.Cause(err) == errRefresh { 634 return nil, errors.Trace(err) 635 } 636 op.AddError(err) 637 return ops, nil 638 } 639 op.Removed = true 640 return append(ops, removeOps...), nil 641 } 642 // In all other cases, application removal will be handled as a consequence 643 // of the removal of the last unit or relation referencing it. If any 644 // relations have been removed, they'll be caught by the operations 645 // collected above; but if any has been added, we need to abort and add 646 // a destroy op for that relation too. In combination, it's enough to 647 // check for count equality: an add/remove will not touch the count, but 648 // will be caught by virtue of being a remove. 649 notLastRefs := bson.D{ 650 {"life", op.app.doc.Life}, 651 {"relationcount", op.app.doc.RelationCount}, 652 } 653 // With respect to unit count, a changing value doesn't matter, so long 654 // as the count's equality with zero does not change, because all we care 655 // about is that *some* unit is, or is not, keeping the application from 656 // being removed: the difference between 1 unit and 1000 is irrelevant. 657 if op.app.doc.UnitCount > 0 { 658 logger.Tracef("DestroyApplicationOperation(%s).destroyOps UnitCount == %d, queuing up unitCleanup", 659 op.app.doc.Name, op.app.doc.UnitCount) 660 cleanupOp := newCleanupOp( 661 cleanupUnitsForDyingApplication, 662 op.app.doc.Name, 663 op.DestroyStorage, 664 op.Force, 665 op.MaxWait, 666 ) 667 ops = append(ops, cleanupOp) 668 notLastRefs = append(notLastRefs, bson.D{{"unitcount", bson.D{{"$gt", 0}}}}...) 669 } else { 670 notLastRefs = append(notLastRefs, bson.D{{"unitcount", 0}}...) 671 } 672 update := bson.D{{"$set", bson.D{{"life", Dying}}}} 673 if removeCount != 0 { 674 decref := bson.D{{"$inc", bson.D{{"relationcount", -removeCount}}}} 675 update = append(update, decref...) 676 } 677 ops = append(ops, txn.Op{ 678 C: applicationsC, 679 Id: op.app.doc.DocID, 680 Assert: notLastRefs, 681 Update: update, 682 }) 683 return ops, nil 684 } 685 686 func (op *DestroyApplicationOperation) unassignBranchOps() ([]txn.Op, error) { 687 m, err := op.app.st.Model() 688 if err != nil { 689 return nil, errors.Trace(err) 690 } 691 appName := op.app.doc.Name 692 branches, err := m.applicationBranches(appName) 693 if err != nil { 694 return nil, errors.Trace(err) 695 } 696 if len(branches) == 0 { 697 return nil, nil 698 } 699 ops := []txn.Op{} 700 for _, b := range branches { 701 // assumption: branches from applicationBranches will 702 // ALWAYS have the appName in assigned-units, but not 703 // always in config. 704 ops = append(ops, b.unassignAppOps(appName)...) 705 } 706 return ops, nil 707 } 708 709 func removeResourcesOps(st *State, applicationID string) ([]txn.Op, error) { 710 resources := st.resources() 711 ops, err := resources.removeResourcesOps(applicationID) 712 if err != nil { 713 return nil, errors.Trace(err) 714 } 715 return ops, nil 716 } 717 718 func (a *Application) removeUnitAssignmentsOps() (ops []txn.Op, err error) { 719 pattern := fmt.Sprintf("^%s:%s/[0-9]+$", a.st.ModelUUID(), a.Name()) 720 unitAssignments, err := a.st.unitAssignments(bson.D{ 721 { 722 Name: "_id", Value: bson.D{ 723 {Name: "$regex", Value: pattern}, 724 }, 725 }, 726 }) 727 if err != nil { 728 return nil, errors.Trace(err) 729 } 730 for _, unitAssignment := range unitAssignments { 731 ops = append(ops, removeStagedAssignmentOp(a.st.docID(unitAssignment.Unit))) 732 } 733 return ops, nil 734 } 735 736 // removeOps returns the operations required to remove the application. Supplied 737 // asserts will be included in the operation on the application document. 738 // When force is set, the operation will proceed regardless of the errors, 739 // and if any errors are encountered, all possible accumulated operations 740 // as well as all encountered errors will be returned. 741 // When 'force' is set, this call will return operations to remove this 742 // application and will accumulate all operational errors encountered in the operation. 743 // If the 'force' is not set, any error will be fatal and no operations will be returned. 744 func (a *Application) removeOps(asserts bson.D, op *ForcedOperation) ([]txn.Op, error) { 745 ops := []txn.Op{{ 746 C: applicationsC, 747 Id: a.doc.DocID, 748 Assert: asserts, 749 Remove: true, 750 }} 751 752 // Remove application offers. 753 removeOfferOps, err := removeApplicationOffersOps(a.st, a.doc.Name) 754 if op.FatalError(err) { 755 return nil, errors.Trace(err) 756 } 757 ops = append(ops, removeOfferOps...) 758 // Remove secret permissions. 759 secretScopedPermissionsOps, err := a.st.removeScopedSecretPermissionOps(a.Tag()) 760 if op.FatalError(err) { 761 return nil, errors.Trace(err) 762 } 763 ops = append(ops, secretScopedPermissionsOps...) 764 secretConsumerPermissionsOps, err := a.st.removeConsumerSecretPermissionOps(a.Tag()) 765 if op.FatalError(err) { 766 return nil, errors.Trace(err) 767 } 768 ops = append(ops, secretConsumerPermissionsOps...) 769 secretLabelOps, err := a.st.removeOwnerSecretLabelsOps(a.ApplicationTag()) 770 if err != nil { 771 return nil, errors.Trace(err) 772 } 773 ops = append(ops, secretLabelOps...) 774 775 secretLabelOps, err = a.st.removeConsumerSecretLabelsOps(a.ApplicationTag()) 776 if err != nil { 777 return nil, errors.Trace(err) 778 } 779 ops = append(ops, secretLabelOps...) 780 781 // Note that appCharmDecRefOps might not catch the final decref 782 // when run in a transaction that decrefs more than once. So we 783 // avoid attempting to do the final cleanup in the ref dec ops and 784 // do it explicitly below. 785 name := a.doc.Name 786 curl := a.doc.CharmURL 787 // When 'force' is set, this call will return operations to delete application references 788 // to this charm as well as accumulate all operational errors encountered in the operation. 789 // If the 'force' is not set, any error will be fatal and no operations will be returned. 790 charmOps, err := appCharmDecRefOps(a.st, name, curl, false, op) 791 if err != nil { 792 if errors.Cause(err) == errRefcountAlreadyZero { 793 // We have already removed the reference to the charm, this indicates 794 // the application is already removed, reload yourself and try again 795 return nil, errRefresh 796 } 797 if op.FatalError(err) { 798 return nil, errors.Trace(err) 799 } 800 } 801 ops = append(ops, charmOps...) 802 803 // By the time we get to here, all units and charm refs have been removed, 804 // so it's safe to do this additional cleanup. 805 ops = append(ops, finalAppCharmRemoveOps(name, curl)...) 806 807 ops = append(ops, a.removeCloudServiceOps()...) 808 globalKey := a.globalKey() 809 ops = append(ops, 810 removeEndpointBindingsOp(globalKey), 811 removeConstraintsOp(globalKey), 812 annotationRemoveOp(a.st, globalKey), 813 removeLeadershipSettingsOp(name), 814 removeStatusOp(a.st, globalKey), 815 removeStatusOp(a.st, applicationGlobalOperatorKey(name)), 816 removeSettingsOp(settingsC, a.applicationConfigKey()), 817 removeModelApplicationRefOp(a.st, name), 818 removePodSpecOp(a.ApplicationTag()), 819 ) 820 821 apr, err := getApplicationPortRanges(a.st, a.Name()) 822 if op.FatalError(err) { 823 return nil, errors.Trace(err) 824 } 825 ops = append(ops, apr.removeOps()...) 826 827 cancelCleanupOps, err := a.cancelScheduledCleanupOps() 828 if err != nil { 829 return nil, errors.Trace(err) 830 } 831 return append(ops, cancelCleanupOps...), nil 832 } 833 834 func (a *Application) cancelScheduledCleanupOps() ([]txn.Op, error) { 835 appOrUnitPattern := bson.DocElem{ 836 Name: "prefix", Value: bson.D{ 837 {Name: "$regex", Value: fmt.Sprintf("^%s(/[0-9]+)*$", a.Name())}, 838 }, 839 } 840 // No unit and app exists now, so cancel the below scheduled cleanup docs to avoid new resources of later deployment 841 // getting removed accidentally because we re-use unit numbers for sidecar applications. 842 cancelCleanupOpsArgs := []cancelCleanupOpsArg{ 843 {cleanupForceDestroyedUnit, appOrUnitPattern}, 844 {cleanupForceRemoveUnit, appOrUnitPattern}, 845 {cleanupForceApplication, appOrUnitPattern}, 846 } 847 relations, err := a.Relations() 848 if err != nil { 849 return nil, errors.Trace(err) 850 } 851 for _, rel := range relations { 852 cancelCleanupOpsArgs = append(cancelCleanupOpsArgs, cancelCleanupOpsArg{ 853 cleanupForceDestroyedRelation, 854 bson.DocElem{ 855 Name: "prefix", Value: relationKey(rel.Endpoints())}, 856 }) 857 } 858 859 cancelCleanupOps, err := a.st.cancelCleanupOps(cancelCleanupOpsArgs...) 860 if err != nil { 861 return nil, errors.Trace(err) 862 } 863 return cancelCleanupOps, nil 864 } 865 866 // IsExposed returns whether this application is exposed. The explicitly open 867 // ports (with open-port) for exposed applications may be accessed from machines 868 // outside of the local deployment network. See MergeExposeSettings and ClearExposed. 869 func (a *Application) IsExposed() bool { 870 return a.doc.Exposed 871 } 872 873 // ExposedEndpoints returns a map where keys are endpoint names (or the "" 874 // value which represents all endpoints) and values are ExposedEndpoint 875 // instances that specify which sources (spaces or CIDRs) can access the 876 // opened ports for each endpoint once the application is exposed. 877 func (a *Application) ExposedEndpoints() map[string]ExposedEndpoint { 878 if len(a.doc.ExposedEndpoints) == 0 { 879 return nil 880 } 881 return a.doc.ExposedEndpoints 882 } 883 884 // UnsetExposeSettings removes the expose settings for the provided list of 885 // endpoint names. If the resulting exposed endpoints map for the application 886 // becomes empty after the settings are removed, the application will be 887 // automatically unexposed. 888 // 889 // An error will be returned if an unknown endpoint name is specified or there 890 // is no existing expose settings entry for any of the provided endpoint names. 891 // 892 // See ClearExposed and IsExposed. 893 func (a *Application) UnsetExposeSettings(exposedEndpoints []string) error { 894 bindings, _, err := readEndpointBindings(a.st, a.globalKey()) 895 if err != nil { 896 return errors.Trace(err) 897 } 898 899 mergedExposedEndpoints := make(map[string]ExposedEndpoint) 900 for endpoint, exposeParams := range a.doc.ExposedEndpoints { 901 mergedExposedEndpoints[endpoint] = exposeParams 902 } 903 904 for _, endpoint := range exposedEndpoints { 905 // The empty endpoint ("") value represents all endpoints. 906 if _, found := bindings[endpoint]; !found && endpoint != "" { 907 return errors.NotFoundf("endpoint %q", endpoint) 908 } 909 910 if _, found := mergedExposedEndpoints[endpoint]; !found { 911 return errors.BadRequestf("endpoint %q is not exposed", endpoint) 912 } 913 914 delete(mergedExposedEndpoints, endpoint) 915 } 916 917 return a.setExposed( 918 // retain expose flag if we still have any expose settings left 919 len(mergedExposedEndpoints) != 0, 920 mergedExposedEndpoints, 921 ) 922 } 923 924 // MergeExposeSettings marks the application as exposed and merges the provided 925 // ExposedEndpoint details into the current set of expose settings. The merge 926 // operation will overwrites expose settings for each existing endpoint name. 927 // 928 // See ClearExposed and IsExposed. 929 func (a *Application) MergeExposeSettings(exposedEndpoints map[string]ExposedEndpoint) error { 930 bindings, _, err := readEndpointBindings(a.st, a.globalKey()) 931 if err != nil { 932 return errors.Trace(err) 933 } 934 935 mergedExposedEndpoints := make(map[string]ExposedEndpoint) 936 for endpoint, exposeParams := range a.doc.ExposedEndpoints { 937 mergedExposedEndpoints[endpoint] = exposeParams 938 } 939 940 var allSpaceInfos network.SpaceInfos 941 for endpoint, exposeParams := range exposedEndpoints { 942 // The empty endpoint ("") value represents all endpoints. 943 if _, found := bindings[endpoint]; !found && endpoint != "" { 944 return errors.NotFoundf("endpoint %q", endpoint) 945 } 946 947 // Verify expose parameters 948 if len(exposeParams.ExposeToSpaceIDs) != 0 && allSpaceInfos == nil { 949 if allSpaceInfos, err = a.st.AllSpaceInfos(); err != nil { 950 return errors.Trace(err) 951 } 952 } 953 954 exposeParams.ExposeToSpaceIDs = uniqueSortedStrings(exposeParams.ExposeToSpaceIDs) 955 for _, spaceID := range exposeParams.ExposeToSpaceIDs { 956 if allSpaceInfos.GetByID(spaceID) == nil { 957 return errors.NotFoundf("space with ID %q", spaceID) 958 } 959 } 960 961 exposeParams.ExposeToCIDRs = uniqueSortedStrings(exposeParams.ExposeToCIDRs) 962 for _, cidr := range exposeParams.ExposeToCIDRs { 963 if _, _, err := net.ParseCIDR(cidr); err != nil { 964 return errors.Annotatef(err, "unable to parse %q as a CIDR", cidr) 965 } 966 } 967 968 // If no spaces and CIDRs are provided, assume an implicit 969 // 0.0.0.0/0 CIDR. This matches the "expose to the entire 970 // world" behavior in juju controllers prior to 2.9. 971 if len(exposeParams.ExposeToSpaceIDs)+len(exposeParams.ExposeToCIDRs) == 0 { 972 exposeParams.ExposeToCIDRs = []string{firewall.AllNetworksIPV4CIDR, firewall.AllNetworksIPV6CIDR} 973 } 974 975 mergedExposedEndpoints[endpoint] = exposeParams 976 } 977 978 return a.setExposed(true, mergedExposedEndpoints) 979 } 980 981 func uniqueSortedStrings(in []string) []string { 982 if len(in) == 0 { 983 return nil 984 } 985 986 return set.NewStrings(in...).SortedValues() 987 } 988 989 // ClearExposed removes the exposed flag from the application. 990 // See MergeExposeSettings and IsExposed. 991 func (a *Application) ClearExposed() error { 992 return a.setExposed(false, nil) 993 } 994 995 func (a *Application) setExposed(exposed bool, exposedEndpoints map[string]ExposedEndpoint) (err error) { 996 ops := []txn.Op{{ 997 C: applicationsC, 998 Id: a.doc.DocID, 999 Assert: isAliveDoc, 1000 Update: bson.D{{"$set", bson.D{ 1001 {"exposed", exposed}, 1002 {"exposed-endpoints", exposedEndpoints}, 1003 }}}, 1004 }} 1005 if err := a.st.db().RunTransaction(ops); err != nil { 1006 return errors.Errorf("cannot set exposed flag for application %q to %v: %v", a, exposed, onAbort(err, applicationNotAliveErr)) 1007 } 1008 a.doc.Exposed = exposed 1009 a.doc.ExposedEndpoints = exposedEndpoints 1010 return nil 1011 } 1012 1013 // Charm returns the application's charm and whether units should upgrade to that 1014 // charm even if they are in an error state. 1015 func (a *Application) Charm() (*Charm, bool, error) { 1016 if a.doc.CharmURL == nil { 1017 return nil, false, errors.NotFoundf("charm for application %q", a.doc.Name) 1018 } 1019 ch, err := a.st.Charm(*a.doc.CharmURL) 1020 if err != nil { 1021 return nil, false, err 1022 } 1023 return ch, a.doc.ForceCharm, nil 1024 } 1025 1026 // CharmOrigin returns the origin of a charm associated with a application. 1027 func (a *Application) CharmOrigin() *CharmOrigin { 1028 return &a.doc.CharmOrigin 1029 } 1030 1031 // IsPrincipal returns whether units of the application can 1032 // have subordinate units. 1033 func (a *Application) IsPrincipal() bool { 1034 return !a.doc.Subordinate 1035 } 1036 1037 // CharmModifiedVersion increases whenever the application's charm is changed in any 1038 // way. 1039 func (a *Application) CharmModifiedVersion() int { 1040 return a.doc.CharmModifiedVersion 1041 } 1042 1043 // CharmURL returns a string version of the application's charm URL, and 1044 // whether units should upgrade to the charm with that URL even if they are 1045 // in an error state. 1046 func (a *Application) CharmURL() (*string, bool) { 1047 return a.doc.CharmURL, a.doc.ForceCharm 1048 } 1049 1050 // Endpoints returns the application's currently available relation endpoints. 1051 func (a *Application) Endpoints() (eps []Endpoint, err error) { 1052 ch, _, err := a.Charm() 1053 if err != nil { 1054 return nil, err 1055 } 1056 collect := func(role charm.RelationRole, rels map[string]charm.Relation) { 1057 for _, rel := range rels { 1058 eps = append(eps, Endpoint{ 1059 ApplicationName: a.doc.Name, 1060 Relation: rel, 1061 }) 1062 } 1063 } 1064 1065 meta := ch.Meta() 1066 if meta == nil { 1067 return nil, errors.Errorf("nil charm metadata for application %q", a.Name()) 1068 } 1069 1070 collect(charm.RolePeer, meta.Peers) 1071 collect(charm.RoleProvider, meta.Provides) 1072 collect(charm.RoleRequirer, meta.Requires) 1073 collect(charm.RoleProvider, map[string]charm.Relation{ 1074 "juju-info": { 1075 Name: "juju-info", 1076 Role: charm.RoleProvider, 1077 Interface: "juju-info", 1078 Scope: charm.ScopeGlobal, 1079 }, 1080 }) 1081 sort.Sort(epSlice(eps)) 1082 return eps, nil 1083 } 1084 1085 // Endpoint returns the relation endpoint with the supplied name, if it exists. 1086 func (a *Application) Endpoint(relationName string) (Endpoint, error) { 1087 eps, err := a.Endpoints() 1088 if err != nil { 1089 return Endpoint{}, err 1090 } 1091 for _, ep := range eps { 1092 if ep.Name == relationName { 1093 return ep, nil 1094 } 1095 } 1096 return Endpoint{}, errors.Errorf("application %q has no %q relation", a, relationName) 1097 } 1098 1099 // extraPeerRelations returns only the peer relations in newMeta not 1100 // present in the application's current charm meta data. 1101 func (a *Application) extraPeerRelations(newMeta *charm.Meta) map[string]charm.Relation { 1102 if newMeta == nil { 1103 // This should never happen, since we're checking the charm in SetCharm already. 1104 panic("newMeta is nil") 1105 } 1106 ch, _, err := a.Charm() 1107 if err != nil { 1108 return nil 1109 } 1110 newPeers := newMeta.Peers 1111 oldPeers := ch.Meta().Peers 1112 extraPeers := make(map[string]charm.Relation) 1113 for relName, rel := range newPeers { 1114 if _, ok := oldPeers[relName]; !ok { 1115 extraPeers[relName] = rel 1116 } 1117 } 1118 return extraPeers 1119 } 1120 1121 func (a *Application) checkRelationsOps(ch *Charm, relations []*Relation) ([]txn.Op, error) { 1122 asserts := make([]txn.Op, 0, len(relations)) 1123 1124 // All relations must still exist and their endpoints are implemented by the charm. 1125 for _, rel := range relations { 1126 if ep, err := rel.Endpoint(a.doc.Name); err != nil { 1127 return nil, err 1128 } else if !ep.ImplementedBy(ch) { 1129 // When switching charms, we should allow peer 1130 // relations to be broken (e.g. because a newer charm 1131 // version removes a particular peer relation) even if 1132 // they are already established as those particular 1133 // relations will become irrelevant once the upgrade is 1134 // complete. 1135 if !isPeer(ep) { 1136 return nil, errors.Errorf("would break relation %q", rel) 1137 } 1138 } 1139 asserts = append(asserts, txn.Op{ 1140 C: relationsC, 1141 Id: rel.doc.DocID, 1142 Assert: txn.DocExists, 1143 }) 1144 } 1145 return asserts, nil 1146 } 1147 1148 func (a *Application) checkStorageUpgrade(newMeta, oldMeta *charm.Meta, units []*Unit) (_ []txn.Op, err error) { 1149 // Make sure no storage instances are added or removed. 1150 1151 sb, err := NewStorageBackend(a.st) 1152 if err != nil { 1153 return nil, errors.Trace(err) 1154 } 1155 1156 var ops []txn.Op 1157 for name, oldStorageMeta := range oldMeta.Storage { 1158 if _, ok := newMeta.Storage[name]; ok { 1159 continue 1160 } 1161 if oldStorageMeta.CountMin > 0 { 1162 return nil, errors.Errorf("required storage %q removed", name) 1163 } 1164 // Optional storage has been removed. So long as there 1165 // are no instances of the store, it can safely be 1166 // removed. 1167 if oldStorageMeta.Shared { 1168 op, n, err := sb.countEntityStorageInstances(a.Tag(), name) 1169 if err != nil { 1170 return nil, errors.Trace(err) 1171 } 1172 if n > 0 { 1173 return nil, errors.Errorf("in-use storage %q removed", name) 1174 } 1175 ops = append(ops, op) 1176 } else { 1177 for _, u := range units { 1178 op, n, err := sb.countEntityStorageInstances(u.Tag(), name) 1179 if err != nil { 1180 return nil, errors.Trace(err) 1181 } 1182 if n > 0 { 1183 return nil, errors.Errorf("in-use storage %q removed", name) 1184 } 1185 ops = append(ops, op) 1186 } 1187 } 1188 } 1189 less := func(a, b int) bool { 1190 return a != -1 && (b == -1 || a < b) 1191 } 1192 for name, newStorageMeta := range newMeta.Storage { 1193 oldStorageMeta, ok := oldMeta.Storage[name] 1194 if !ok { 1195 continue 1196 } 1197 if newStorageMeta.Type != oldStorageMeta.Type { 1198 return nil, errors.Errorf( 1199 "existing storage %q type changed from %q to %q", 1200 name, oldStorageMeta.Type, newStorageMeta.Type, 1201 ) 1202 } 1203 if newStorageMeta.Shared != oldStorageMeta.Shared { 1204 return nil, errors.Errorf( 1205 "existing storage %q shared changed from %v to %v", 1206 name, oldStorageMeta.Shared, newStorageMeta.Shared, 1207 ) 1208 } 1209 if newStorageMeta.ReadOnly != oldStorageMeta.ReadOnly { 1210 return nil, errors.Errorf( 1211 "existing storage %q read-only changed from %v to %v", 1212 name, oldStorageMeta.ReadOnly, newStorageMeta.ReadOnly, 1213 ) 1214 } 1215 if newStorageMeta.Location != oldStorageMeta.Location { 1216 return nil, errors.Errorf( 1217 "existing storage %q location changed from %q to %q", 1218 name, oldStorageMeta.Location, newStorageMeta.Location, 1219 ) 1220 } 1221 if less(newStorageMeta.CountMax, oldStorageMeta.CountMax) { 1222 var oldCountMax interface{} = oldStorageMeta.CountMax 1223 if oldStorageMeta.CountMax == -1 { 1224 oldCountMax = "<unbounded>" 1225 } 1226 return nil, errors.Errorf( 1227 "existing storage %q range contracted: max decreased from %v to %d", 1228 name, oldCountMax, newStorageMeta.CountMax, 1229 ) 1230 } 1231 if oldStorageMeta.Location != "" && oldStorageMeta.CountMax == 1 && newStorageMeta.CountMax != 1 { 1232 // If a location is specified, the store may not go 1233 // from being a singleton to multiple, since then the 1234 // location has a different meaning. 1235 return nil, errors.Errorf( 1236 "existing storage %q with location changed from single to multiple", 1237 name, 1238 ) 1239 } 1240 } 1241 return ops, nil 1242 } 1243 1244 // IsSidecar returns true when using new CAAS charms in sidecar mode. 1245 func (a *Application) IsSidecar() (bool, error) { 1246 ch, _, err := a.Charm() 1247 if err != nil { 1248 return false, errors.Trace(err) 1249 } 1250 meta := ch.Meta() 1251 if meta == nil { 1252 return false, nil 1253 } 1254 m, err := a.st.Model() 1255 if err != nil { 1256 return false, errors.Trace(err) 1257 } 1258 1259 // TODO(sidecar): Determine a better way represent this. 1260 return m.Type() == ModelTypeCAAS && charm.MetaFormat(ch) == charm.FormatV2, nil 1261 } 1262 1263 // changeCharmOps returns the operations necessary to set a application's 1264 // charm URL to a new value. 1265 func (a *Application) changeCharmOps( 1266 ch *Charm, 1267 updatedSettings charm.Settings, 1268 forceUnits bool, 1269 updatedStorageConstraints map[string]StorageConstraints, 1270 ) ([]txn.Op, error) { 1271 // Build the new application config from what can be used of the old one. 1272 var newSettings charm.Settings 1273 oldKey, err := readSettings(a.st.db(), settingsC, a.charmConfigKey()) 1274 if err == nil { 1275 // Filter the old settings through to get the new settings. 1276 newSettings = ch.Config().FilterSettings(oldKey.Map()) 1277 for k, v := range updatedSettings { 1278 newSettings[k] = v 1279 } 1280 } else if errors.IsNotFound(err) { 1281 // No old settings, start with the updated settings. 1282 newSettings = updatedSettings 1283 } else { 1284 return nil, errors.Annotatef(err, "application %q", a.doc.Name) 1285 } 1286 1287 cURL := ch.URL() 1288 // Create or replace application settings. 1289 var settingsOp txn.Op 1290 newSettingsKey := applicationCharmConfigKey(a.doc.Name, &cURL) 1291 if _, err := readSettings(a.st.db(), settingsC, newSettingsKey); errors.IsNotFound(err) { 1292 // No settings for this key yet, create it. 1293 settingsOp = createSettingsOp(settingsC, newSettingsKey, newSettings) 1294 } else if err != nil { 1295 return nil, errors.Annotatef(err, "application %q", a.doc.Name) 1296 } else { 1297 // Settings exist, just replace them with the new ones. 1298 settingsOp, _, err = replaceSettingsOp(a.st.db(), settingsC, newSettingsKey, newSettings) 1299 if err != nil { 1300 return nil, errors.Annotatef(err, "application %q", a.doc.Name) 1301 } 1302 } 1303 1304 // Make sure no units are added or removed while the upgrade 1305 // transaction is being executed. This allows us to make 1306 // changes to units during the upgrade, e.g. add storage 1307 // to existing units, or remove optional storage so long as 1308 // it is unreferenced. 1309 units, err := a.AllUnits() 1310 if err != nil { 1311 return nil, errors.Trace(err) 1312 } 1313 unitOps := make([]txn.Op, len(units)) 1314 for i, u := range units { 1315 unitOps[i] = txn.Op{ 1316 C: unitsC, 1317 Id: u.doc.DocID, 1318 Assert: txn.DocExists, 1319 } 1320 } 1321 unitOps = append(unitOps, txn.Op{ 1322 C: applicationsC, 1323 Id: a.doc.DocID, 1324 Assert: bson.D{{"unitcount", len(units)}}, 1325 }) 1326 1327 checkStorageOps, upgradeStorageOps, storageConstraintsOps, err := a.newCharmStorageOps(ch, units, updatedStorageConstraints) 1328 if err != nil { 1329 return nil, errors.Trace(err) 1330 } 1331 1332 // Add or create a reference to the new charm, settings, 1333 // and storage constraints docs. 1334 incOps, err := appCharmIncRefOps(a.st, a.doc.Name, &cURL, true) 1335 if err != nil { 1336 return nil, errors.Trace(err) 1337 } 1338 var decOps []txn.Op 1339 // Drop the references to the old settings, storage constraints, 1340 // and charm docs (if the refs actually exist yet). 1341 if oldKey != nil { 1342 // Since we can force this now, let's.. There is no point hanging on 1343 // to the old key. 1344 op := &ForcedOperation{Force: true} 1345 decOps, err = appCharmDecRefOps(a.st, a.doc.Name, a.doc.CharmURL, true, op) // current charm 1346 if err != nil { 1347 return nil, errors.Annotatef(err, "could not remove old charm references for %v", oldKey) 1348 } 1349 if len(op.Errors) != 0 { 1350 logger.Errorf("could not remove old charm references for %v:%v", oldKey, op.Errors) 1351 } 1352 } 1353 1354 // Build the transaction. 1355 var ops []txn.Op 1356 if oldKey != nil { 1357 // Old settings shouldn't change (when they exist). 1358 ops = append(ops, oldKey.assertUnchangedOp()) 1359 } 1360 ops = append(ops, unitOps...) 1361 ops = append(ops, incOps...) 1362 ops = append(ops, []txn.Op{ 1363 // Create or replace new settings. 1364 settingsOp, 1365 // Update the charm URL and force flag (if relevant). 1366 { 1367 C: applicationsC, 1368 Id: a.doc.DocID, 1369 Update: bson.D{{"$set", bson.D{ 1370 {"charmurl", cURL}, 1371 {"forcecharm", forceUnits}, 1372 }}}, 1373 }, 1374 }...) 1375 ops = append(ops, storageConstraintsOps...) 1376 ops = append(ops, checkStorageOps...) 1377 ops = append(ops, upgradeStorageOps...) 1378 1379 ops = append(ops, incCharmModifiedVersionOps(a.doc.DocID)...) 1380 1381 // Get all relations - we need to check them later. 1382 relations, err := a.Relations() 1383 if err != nil { 1384 return nil, errors.Trace(err) 1385 } 1386 1387 // Remove any stale peer relation entries when switching charms 1388 removeStalePeerOps, err := a.st.removeStalePeerRelationsOps(a.doc.Name, relations, ch.Meta()) 1389 if err != nil { 1390 return nil, errors.Trace(err) 1391 } 1392 ops = append(ops, removeStalePeerOps...) 1393 1394 // Add any extra peer relations that need creation. 1395 newPeers := a.extraPeerRelations(ch.Meta()) 1396 addPeerOps, err := a.st.addPeerRelationsOps(a.doc.Name, newPeers) 1397 if err != nil { 1398 return nil, errors.Trace(err) 1399 } 1400 ops = append(ops, addPeerOps...) 1401 1402 // Update the relation count as well. 1403 if len(newPeers) > 0 { 1404 // Make sure the relation count does not change. 1405 sameRelCount := bson.D{{"relationcount", len(relations)}} 1406 ops = append(ops, txn.Op{ 1407 C: applicationsC, 1408 Id: a.doc.DocID, 1409 Assert: append(notDeadDoc, sameRelCount...), 1410 Update: bson.D{{"$inc", bson.D{{"relationcount", len(newPeers)}}}}, 1411 }) 1412 } 1413 // Check relations to ensure no active relations are removed. 1414 relOps, err := a.checkRelationsOps(ch, relations) 1415 if err != nil { 1416 return nil, errors.Trace(err) 1417 } 1418 ops = append(ops, relOps...) 1419 1420 // And finally, decrement the old charm and settings. 1421 return append(ops, decOps...), nil 1422 } 1423 1424 // bindingsForOps returns a Bindings object intended for createOps and updateOps 1425 // only. 1426 func (a *Application) bindingsForOps(bindings map[string]string) (*Bindings, error) { 1427 // Call NewBindings first to ensure this map contains space ids 1428 b, err := NewBindings(a.st, bindings) 1429 if err != nil { 1430 return nil, err 1431 } 1432 b.app = a 1433 return b, nil 1434 } 1435 1436 // Deployed machines returns the collection of machines 1437 // that this application has units deployed to. 1438 func (a *Application) DeployedMachines() ([]*Machine, error) { 1439 units, err := a.AllUnits() 1440 if err != nil { 1441 return nil, errors.Trace(err) 1442 } 1443 1444 machineIds := set.NewStrings() 1445 var machines []*Machine 1446 for _, u := range units { 1447 // AssignedMachineId returns the correct machine 1448 // whether principal or subordinate. 1449 id, err := u.AssignedMachineId() 1450 if err != nil { 1451 if errors.IsNotAssigned(err) { 1452 // We aren't interested in this unit at this time. 1453 continue 1454 } 1455 return nil, errors.Trace(err) 1456 } 1457 if machineIds.Contains(id) { 1458 continue 1459 } 1460 1461 m, err := a.st.Machine(id) 1462 if err != nil { 1463 return nil, errors.Trace(err) 1464 } 1465 machineIds.Add(id) 1466 machines = append(machines, m) 1467 } 1468 return machines, nil 1469 } 1470 1471 func (a *Application) newCharmStorageOps( 1472 ch *Charm, 1473 units []*Unit, 1474 updatedStorageConstraints map[string]StorageConstraints, 1475 ) ([]txn.Op, []txn.Op, []txn.Op, error) { 1476 1477 fail := func(err error) ([]txn.Op, []txn.Op, []txn.Op, error) { 1478 return nil, nil, nil, errors.Trace(err) 1479 } 1480 1481 // Check storage to ensure no referenced storage is removed, or changed 1482 // in an incompatible way. We do this before computing the new storage 1483 // constraints, as incompatible charm changes will otherwise yield 1484 // confusing error messages that would suggest the user has supplied 1485 // invalid constraints. 1486 sb, err := NewStorageBackend(a.st) 1487 if err != nil { 1488 return fail(err) 1489 } 1490 oldCharm, _, err := a.Charm() 1491 if err != nil { 1492 return fail(err) 1493 } 1494 oldMeta := oldCharm.Meta() 1495 checkStorageOps, err := a.checkStorageUpgrade(ch.Meta(), oldMeta, units) 1496 if err != nil { 1497 return fail(err) 1498 } 1499 1500 // Create or replace storage constraints. We take the existing storage 1501 // constraints, remove any keys that are no longer referenced by the 1502 // charm, and update the constraints that the user has specified. 1503 var storageConstraintsOp txn.Op 1504 oldStorageConstraints, err := a.StorageConstraints() 1505 if err != nil { 1506 return fail(err) 1507 } 1508 newStorageConstraints := oldStorageConstraints 1509 for name, cons := range updatedStorageConstraints { 1510 newStorageConstraints[name] = cons 1511 } 1512 for name := range newStorageConstraints { 1513 if _, ok := ch.Meta().Storage[name]; !ok { 1514 delete(newStorageConstraints, name) 1515 } 1516 } 1517 if err := addDefaultStorageConstraints(sb, newStorageConstraints, ch.Meta()); err != nil { 1518 return fail(errors.Annotate(err, "adding default storage constraints")) 1519 } 1520 if err := validateStorageConstraints(sb, newStorageConstraints, ch.Meta()); err != nil { 1521 return fail(errors.Annotate(err, "validating storage constraints")) 1522 } 1523 cURL := ch.URL() 1524 newStorageConstraintsKey := applicationStorageConstraintsKey(a.doc.Name, &cURL) 1525 if _, err := readStorageConstraints(sb.mb, newStorageConstraintsKey); errors.IsNotFound(err) { 1526 storageConstraintsOp = createStorageConstraintsOp( 1527 newStorageConstraintsKey, newStorageConstraints, 1528 ) 1529 } else if err != nil { 1530 return fail(err) 1531 } else { 1532 storageConstraintsOp = replaceStorageConstraintsOp( 1533 newStorageConstraintsKey, newStorageConstraints, 1534 ) 1535 } 1536 1537 // Upgrade charm storage. 1538 upgradeStorageOps, err := a.upgradeStorageOps(ch.Meta(), oldMeta, units, newStorageConstraints) 1539 if err != nil { 1540 return fail(err) 1541 } 1542 return checkStorageOps, upgradeStorageOps, []txn.Op{storageConstraintsOp}, nil 1543 } 1544 1545 func (a *Application) upgradeStorageOps( 1546 meta, oldMeta *charm.Meta, 1547 units []*Unit, 1548 allStorageCons map[string]StorageConstraints, 1549 ) (_ []txn.Op, err error) { 1550 1551 sb, err := NewStorageBackend(a.st) 1552 if err != nil { 1553 return nil, errors.Trace(err) 1554 } 1555 1556 // For each store, ensure that every unit has the minimum requirements. 1557 // If a unit has an existing store, but its minimum count has been 1558 // increased, we only add the shortfall; we do not necessarily add as 1559 // many instances as are specified in the storage constraints. 1560 var ops []txn.Op 1561 for name, cons := range allStorageCons { 1562 for _, u := range units { 1563 countMin := meta.Storage[name].CountMin 1564 if _, ok := oldMeta.Storage[name]; !ok { 1565 // The store did not exist previously, so we 1566 // create the full amount specified in the 1567 // constraints. 1568 countMin = int(cons.Count) 1569 } 1570 _, unitOps, err := sb.addUnitStorageOps( 1571 meta, u, name, cons, countMin, 1572 ) 1573 if err != nil { 1574 return nil, errors.Trace(err) 1575 } 1576 ops = append(ops, unitOps...) 1577 } 1578 } 1579 return ops, nil 1580 } 1581 1582 // incCharmModifiedVersionOps returns the operations necessary to increment 1583 // the CharmModifiedVersion field for the given application. 1584 func incCharmModifiedVersionOps(applicationID string) []txn.Op { 1585 return []txn.Op{{ 1586 C: applicationsC, 1587 Id: applicationID, 1588 Assert: txn.DocExists, 1589 Update: bson.D{{"$inc", bson.D{{"charmmodifiedversion", 1}}}}, 1590 }} 1591 } 1592 1593 func (a *Application) resolveResourceOps(pendingResourceIDs map[string]string) ([]txn.Op, error) { 1594 // Collect pending resource resolution operations. 1595 resources := a.st.Resources().(*resourcePersistence) 1596 return resources.resolveApplicationPendingResourcesOps(a.doc.Name, pendingResourceIDs) 1597 } 1598 1599 // SetCharmConfig contains the parameters for Application.SetCharm. 1600 type SetCharmConfig struct { 1601 // Charm is the new charm to use for the application. New units 1602 // will be started with this charm, and existing units will be 1603 // upgraded to use it. 1604 Charm *Charm 1605 1606 // CharmOrigin is the data for where the charm comes from. Eventually 1607 // Channel should be move there. 1608 CharmOrigin *CharmOrigin 1609 1610 // ConfigSettings is the charm config settings to apply when upgrading 1611 // the charm. 1612 ConfigSettings charm.Settings 1613 1614 // ForceUnits forces the upgrade on units in an error state. 1615 ForceUnits bool 1616 1617 // ForceBase forces the use of the charm even if it is not one of 1618 // the charm's supported series. 1619 ForceBase bool 1620 1621 // Force forces the overriding of the lxd profile validation even if the 1622 // profile doesn't validate. 1623 Force bool 1624 1625 // PendingResourceIDs is a map of resource names to resource IDs to activate during 1626 // the upgrade. 1627 PendingResourceIDs map[string]string 1628 1629 // StorageConstraints contains the storage constraints to add or update when 1630 // upgrading the charm. 1631 // 1632 // Any existing storage instances for the named stores will be 1633 // unaffected; the storage constraints will only be used for 1634 // provisioning new storage instances. 1635 StorageConstraints map[string]StorageConstraints 1636 1637 // EndpointBindings is an operator-defined map of endpoint names to 1638 // space names that should be merged with any existing bindings. 1639 EndpointBindings map[string]string 1640 1641 // RequireNoUnits is set when upgrading from podspec to sidecar charm to ensure 1642 // the application is scaled to 0 units first. 1643 RequireNoUnits bool 1644 } 1645 1646 func (a *Application) validateSetCharmConfig(cfg SetCharmConfig) error { 1647 if cfg.Charm.Meta().Subordinate != a.doc.Subordinate { 1648 return errors.Errorf("cannot change an application's subordinacy") 1649 } 1650 origin := cfg.CharmOrigin 1651 if origin == nil { 1652 return errors.NotValidf("nil charm origin") 1653 } 1654 if origin.Platform == nil { 1655 return errors.BadRequestf("charm origin platform is nil") 1656 } 1657 if (origin.ID != "" && origin.Hash == "") || (origin.ID == "" && origin.Hash != "") { 1658 return errors.BadRequestf("programming error, SetCharm, neither CharmOrigin ID nor Hash can be set before a charm is downloaded. See CharmHubRepository GetDownloadURL.") 1659 } 1660 1661 currentCharm, err := a.st.Charm(*a.doc.CharmURL) 1662 if err != nil { 1663 return errors.Trace(err) 1664 } 1665 if cfg.Charm.Meta().Deployment != currentCharm.Meta().Deployment { 1666 if cfg.Charm.Meta().Deployment == nil || currentCharm.Meta().Deployment == nil { 1667 return errors.New("cannot change a charm's deployment info") 1668 } 1669 if cfg.Charm.Meta().Deployment.DeploymentType != currentCharm.Meta().Deployment.DeploymentType { 1670 return errors.New("cannot change a charm's deployment type") 1671 } 1672 if cfg.Charm.Meta().Deployment.DeploymentMode != currentCharm.Meta().Deployment.DeploymentMode { 1673 return errors.New("cannot change a charm's deployment mode") 1674 } 1675 } 1676 1677 // If it's a v1 or v2 machine charm (no containers), check series. 1678 if charm.MetaFormat(cfg.Charm) == charm.FormatV1 || !corecharm.IsKubernetes(cfg.Charm) { 1679 err := checkBaseForSetCharm(a.CharmOrigin().Platform, cfg.Charm, cfg.ForceBase) 1680 if err != nil { 1681 return errors.Trace(err) 1682 } 1683 } 1684 1685 // we don't need to check that this is a charm.LXDProfiler, as we can 1686 // state that the function exists. 1687 if profile := cfg.Charm.LXDProfile(); profile != nil { 1688 // Validate the config devices, to ensure we don't apply an invalid 1689 // profile, if we know it's never going to work. 1690 // TODO (stickupkid): Validation of config devices is totally in the 1691 // wrong place. Validation should be done at the API server layer, not 1692 // at the state layer. 1693 if err := profile.ValidateConfigDevices(); err != nil && !cfg.Force { 1694 return errors.Annotate(err, "validating lxd profile") 1695 } 1696 } 1697 return nil 1698 } 1699 1700 // SetCharm changes the charm for the application. 1701 func (a *Application) SetCharm(cfg SetCharmConfig) (err error) { 1702 defer errors.DeferredAnnotatef( 1703 &err, "cannot upgrade application %q to charm %q", a, cfg.Charm.URL(), 1704 ) 1705 1706 // Validate the input. ValidateSettings validates and transforms 1707 // leaving it here. 1708 if err := a.validateSetCharmConfig(cfg); err != nil { 1709 return errors.Trace(err) 1710 } 1711 1712 updatedSettings, err := cfg.Charm.Config().ValidateSettings(cfg.ConfigSettings) 1713 if err != nil { 1714 return errors.Annotate(err, "validating config settings") 1715 } 1716 1717 var newCharmModifiedVersion int 1718 acopy := &Application{a.st, a.doc} 1719 buildTxn := func(attempt int) ([]txn.Op, error) { 1720 a := acopy 1721 if attempt > 0 { 1722 if err := a.Refresh(); err != nil { 1723 return nil, errors.Trace(err) 1724 } 1725 } 1726 1727 // NOTE: We're explicitly allowing SetCharm to succeed 1728 // when the application is Dying, because application/charm 1729 // upgrades should still be allowed to apply to dying 1730 // applications and units, so that bugs in departed/broken 1731 // hooks can be addressed at runtime. 1732 if a.Life() == Dead { 1733 return nil, stateerrors.ErrDead 1734 } 1735 1736 // Record the current value of charmModifiedVersion, so we can 1737 // set the value on the method receiver's in-memory document 1738 // structure. We increment the version only when we change the 1739 // charm URL. 1740 newCharmModifiedVersion = a.doc.CharmModifiedVersion 1741 1742 ops := []txn.Op{{ 1743 C: applicationsC, 1744 Id: a.doc.DocID, 1745 Assert: append(notDeadDoc, bson.DocElem{ 1746 "charmmodifiedversion", a.doc.CharmModifiedVersion, 1747 }), 1748 }} 1749 1750 if *a.doc.CharmURL == cfg.Charm.URL() { 1751 updates := bson.D{ 1752 {"forcecharm", cfg.ForceUnits}, 1753 } 1754 // Charm URL already set; just update the force flag. 1755 ops = append(ops, txn.Op{ 1756 C: applicationsC, 1757 Id: a.doc.DocID, 1758 Assert: txn.DocExists, 1759 Update: bson.D{{"$set", updates}}, 1760 }) 1761 } else { 1762 // Check if the new charm specifies a relation max limit 1763 // that cannot be satisfied by the currently established 1764 // relation count. 1765 quotaErr := a.preUpgradeRelationLimitCheck(cfg.Charm) 1766 1767 // If the operator specified --force, we still allow 1768 // the upgrade to continue with a warning. 1769 if errors.IsQuotaLimitExceeded(quotaErr) && cfg.Force { 1770 logger.Warningf("%v; allowing upgrade to proceed as the operator specified --force", quotaErr) 1771 } else if quotaErr != nil { 1772 return nil, errors.Trace(quotaErr) 1773 } 1774 1775 chng, err := a.changeCharmOps( 1776 cfg.Charm, 1777 updatedSettings, 1778 cfg.ForceUnits, 1779 cfg.StorageConstraints, 1780 ) 1781 if err != nil { 1782 return nil, errors.Trace(err) 1783 } 1784 ops = append(ops, chng...) 1785 newCharmModifiedVersion++ 1786 } 1787 1788 // Resources can be upgraded independent of a charm upgrade. 1789 resourceOps, err := a.resolveResourceOps(cfg.PendingResourceIDs) 1790 if err != nil { 1791 return nil, errors.Trace(err) 1792 } 1793 ops = append(ops, resourceOps...) 1794 // Only update newCharmModifiedVersion once. It might have been 1795 // incremented in charmCharmOps. 1796 if len(resourceOps) > 0 && newCharmModifiedVersion == a.doc.CharmModifiedVersion { 1797 ops = append(ops, incCharmModifiedVersionOps(a.doc.DocID)...) 1798 newCharmModifiedVersion++ 1799 } 1800 1801 // Update the charm origin 1802 ops = append(ops, txn.Op{ 1803 C: applicationsC, 1804 Id: a.doc.DocID, 1805 Assert: txn.DocExists, 1806 Update: bson.D{{"$set", bson.D{ 1807 {"charm-origin", *cfg.CharmOrigin}, 1808 }}}, 1809 }) 1810 1811 if cfg.RequireNoUnits { 1812 if a.UnitCount()+a.GetScale() > 0 { 1813 return nil, stateerrors.ErrApplicationShouldNotHaveUnits 1814 } 1815 ops = append(ops, txn.Op{ 1816 C: applicationsC, 1817 Id: a.doc.DocID, 1818 Assert: bson.D{{"scale", 0}, {"unitcount", 0}}, 1819 }) 1820 } 1821 1822 // Always update bindings regardless of whether we upgrade to a 1823 // new version or stay at the previous version. 1824 currentMap, txnRevno, err := readEndpointBindings(a.st, a.globalKey()) 1825 if err != nil && !errors.IsNotFound(err) { 1826 return ops, errors.Trace(err) 1827 } 1828 b, err := a.bindingsForOps(currentMap) 1829 if err != nil { 1830 return nil, errors.Trace(err) 1831 } 1832 endpointBindingsOps, err := b.updateOps(txnRevno, cfg.EndpointBindings, cfg.Charm.Meta(), cfg.Force) 1833 if err == nil { 1834 ops = append(ops, endpointBindingsOps...) 1835 } else if !errors.IsNotFound(err) && err != jujutxn.ErrNoOperations { 1836 // If endpoint bindings do not exist this most likely means the application 1837 // itself no longer exists, which will be caught soon enough anyway. 1838 // ErrNoOperations on the other hand means there's nothing to update. 1839 return nil, errors.Trace(err) 1840 } 1841 return ops, nil 1842 } 1843 1844 if err := a.st.db().Run(buildTxn); err != nil { 1845 return err 1846 } 1847 return a.Refresh() 1848 } 1849 1850 // SetDownloadedIDAndHash updates the applications charm origin with ID and 1851 // hash values. This should ONLY be done from the async downloader. 1852 // The hash cannot be updated if the charm origin has no ID, nor was one 1853 // provided as an argument. The ID cannot be changed. 1854 func (a *Application) SetDownloadedIDAndHash(id, hash string) error { 1855 if id == "" && hash == "" { 1856 return errors.BadRequestf("ID, %q, and hash, %q, must have values", id, hash) 1857 } 1858 if id != "" && a.doc.CharmOrigin.ID != "" && a.doc.CharmOrigin.ID != id { 1859 return errors.BadRequestf("application ID cannot be changed %q, %q", a.doc.CharmOrigin.ID, id) 1860 } 1861 if id != "" && hash == "" { 1862 return errors.BadRequestf("programming error, SetDownloadedIDAndHash, cannot have an ID without a hash after downloading. See CharmHubRepository GetDownloadURL.") 1863 } 1864 buildTxn := func(attempt int) ([]txn.Op, error) { 1865 if attempt > 0 { 1866 if err := a.Refresh(); err != nil { 1867 return nil, errors.Trace(err) 1868 } 1869 } 1870 if a.Life() != Alive { 1871 return nil, errors.New("application is not alive") 1872 } 1873 ops := []txn.Op{{ 1874 C: applicationsC, 1875 Id: a.doc.DocID, 1876 Assert: isAliveDoc, 1877 }} 1878 if id != "" { 1879 ops = append(ops, txn.Op{ 1880 C: applicationsC, 1881 Id: a.doc.DocID, 1882 Assert: txn.DocExists, 1883 Update: bson.D{{"$set", bson.D{ 1884 {"charm-origin.id", id}, 1885 }}}, 1886 }) 1887 } 1888 if hash != "" { 1889 ops = append(ops, txn.Op{ 1890 C: applicationsC, 1891 Id: a.doc.DocID, 1892 Assert: txn.DocExists, 1893 Update: bson.D{{"$set", bson.D{ 1894 {"charm-origin.hash", hash}, 1895 }}}, 1896 }) 1897 } 1898 return ops, nil 1899 } 1900 if err := a.st.db().Run(buildTxn); err != nil { 1901 return err 1902 } 1903 if id != "" { 1904 a.doc.CharmOrigin.ID = id 1905 } 1906 if hash != "" { 1907 a.doc.CharmOrigin.Hash = hash 1908 } 1909 return nil 1910 } 1911 1912 // checkBaseForSetCharm verifies that the 1913 func checkBaseForSetCharm(currentPlatform *Platform, ch *Charm, ForceBase bool) error { 1914 curBase, err := corebase.ParseBase(currentPlatform.OS, currentPlatform.Channel) 1915 if err != nil { 1916 return errors.Trace(err) 1917 } 1918 if !ForceBase { 1919 return errors.Trace(corecharm.BaseIsCompatibleWithCharm(curBase, ch)) 1920 } 1921 // Even with forceBase=true, we do not allow a charm to be used which is for 1922 // a different OS. 1923 return errors.Trace(corecharm.OSIsCompatibleWithCharm(curBase.OS, ch)) 1924 } 1925 1926 // preUpgradeRelationLimitCheck ensures that the already established relation 1927 // counts do not violate the max relation limits specified by the charm version 1928 // we are attempting to upgrade to. 1929 func (a *Application) preUpgradeRelationLimitCheck(newCharm *Charm) error { 1930 var ( 1931 existingRels []*Relation 1932 err error 1933 ) 1934 1935 for relName, relSpec := range newCharm.Meta().CombinedRelations() { 1936 if relSpec.Limit == 0 { 1937 continue 1938 } 1939 1940 // Load and memoize relation list 1941 if existingRels == nil { 1942 if existingRels, err = a.Relations(); err != nil { 1943 return errors.Trace(err) 1944 } 1945 1946 } 1947 1948 establishedCount := establishedRelationCount(existingRels, a.Name(), relSpec) 1949 if establishedCount > relSpec.Limit { 1950 return errors.QuotaLimitExceededf("new charm version imposes a maximum relation limit of %d for %s:%s which cannot be satisfied by the number of already established relations (%d)", relSpec.Limit, a.Name(), relName, establishedCount) 1951 } 1952 } 1953 1954 return nil 1955 } 1956 1957 // establishedRelationCount returns the number of already established relations 1958 // for appName and the endpoint specified in the provided relation details. 1959 func establishedRelationCount(existingRelList []*Relation, appName string, rel charm.Relation) int { 1960 var establishedCount int 1961 for _, existingRel := range existingRelList { 1962 // Suspended relations don't count 1963 if existingRel.Suspended() { 1964 continue 1965 } 1966 1967 for _, existingRelEp := range existingRel.Endpoints() { 1968 if existingRelEp.ApplicationName == appName && 1969 existingRelEp.Relation.Name == rel.Name && 1970 existingRelEp.Relation.Interface == rel.Interface { 1971 establishedCount++ 1972 break 1973 } 1974 } 1975 } 1976 1977 return establishedCount 1978 } 1979 1980 // MergeBindings merges the provided bindings map with the existing application 1981 // bindings. 1982 func (a *Application) MergeBindings(operatorBindings *Bindings, force bool) error { 1983 buildTxn := func(attempt int) ([]txn.Op, error) { 1984 if attempt > 0 { 1985 if err := a.Refresh(); err != nil { 1986 return nil, errors.Trace(err) 1987 } 1988 } 1989 1990 ch, _, err := a.Charm() 1991 if err != nil { 1992 return nil, errors.Trace(err) 1993 } 1994 1995 currentMap, txnRevno, err := readEndpointBindings(a.st, a.globalKey()) 1996 if err != nil && !errors.IsNotFound(err) { 1997 return nil, errors.Trace(err) 1998 } 1999 b, err := a.bindingsForOps(currentMap) 2000 if err != nil { 2001 return nil, errors.Trace(err) 2002 } 2003 endpointBindingsOps, err := b.updateOps(txnRevno, operatorBindings.Map(), ch.Meta(), force) 2004 if err != nil && !errors.IsNotFound(err) && err != jujutxn.ErrNoOperations { 2005 return nil, errors.Trace(err) 2006 } 2007 2008 return endpointBindingsOps, err 2009 } 2010 2011 err := a.st.db().Run(buildTxn) 2012 return errors.Annotatef(err, "merging application bindings") 2013 } 2014 2015 // unitAppName returns the name of the Application, given a Unit's name. 2016 func unitAppName(unitName string) string { 2017 unitParts := strings.Split(unitName, "/") 2018 return unitParts[0] 2019 } 2020 2021 // UpdateApplicationBase updates the base for the Application. 2022 func (a *Application) UpdateApplicationBase(newBase Base, force bool) (err error) { 2023 buildTxn := func(attempt int) ([]txn.Op, error) { 2024 if attempt > 0 { 2025 // If we've tried once already and failed, re-evaluate the criteria. 2026 if err := a.Refresh(); err != nil { 2027 return nil, errors.Trace(err) 2028 } 2029 } 2030 // Exit early if the Application series doesn't need to change 2031 if err := a.Refresh(); err != nil { 2032 return nil, errors.Trace(err) 2033 } 2034 appOrigin := a.CharmOrigin() 2035 appBase, err := corebase.ParseBase(appOrigin.Platform.OS, appOrigin.Platform.Channel) 2036 if err != nil { 2037 return nil, errors.Trace(err) 2038 } 2039 newAppBase, err := corebase.ParseBase(newBase.OS, newBase.Channel) 2040 if err != nil { 2041 return nil, errors.Trace(err) 2042 } 2043 sameOrigin := appBase.DisplayString() == newAppBase.DisplayString() 2044 if sameOrigin { 2045 return nil, jujutxn.ErrNoOperations 2046 } 2047 2048 // Verify and gather data for the transaction operations. 2049 if !force { 2050 err = a.VerifySupportedBase(newBase) 2051 if err != nil { 2052 return nil, err 2053 } 2054 } 2055 units, err := a.AllUnits() 2056 if err != nil { 2057 return nil, errors.Trace(err) 2058 } 2059 var subApps []*Application 2060 var unit *Unit 2061 2062 if len(units) > 0 { 2063 // All units have the same subordinates... 2064 unit = units[0] 2065 for _, n := range unit.SubordinateNames() { 2066 app, err := a.st.Application(unitAppName(n)) 2067 if err != nil { 2068 return nil, err 2069 } 2070 if !force { 2071 err = app.VerifySupportedBase(newBase) 2072 if err != nil { 2073 return nil, err 2074 } 2075 } 2076 subApps = append(subApps, app) 2077 } 2078 } 2079 2080 //Create the transaction operations 2081 ops := []txn.Op{{ 2082 C: applicationsC, 2083 Id: a.doc.DocID, 2084 Assert: bson.D{{"life", Alive}, 2085 {"charmurl", a.doc.CharmURL}, 2086 {"unitcount", a.doc.UnitCount}}, 2087 Update: bson.D{{"$set", bson.D{{ 2088 "charm-origin.platform.channel", newAppBase.Channel.String()}}}}, 2089 }} 2090 2091 if unit != nil { 2092 ops = append(ops, txn.Op{ 2093 C: unitsC, 2094 Id: unit.doc.DocID, 2095 Assert: bson.D{{"life", Alive}, 2096 {"subordinates", unit.SubordinateNames()}}, 2097 }) 2098 } 2099 2100 for _, sub := range subApps { 2101 ops = append(ops, txn.Op{ 2102 C: applicationsC, 2103 Id: sub.doc.DocID, 2104 Assert: bson.D{{"life", Alive}, 2105 {"charmurl", sub.doc.CharmURL}, 2106 {"unitcount", sub.doc.UnitCount}}, 2107 Update: bson.D{{"$set", bson.D{{ 2108 "charm-origin.platform.channel", newAppBase.Channel.String()}}}}, 2109 }) 2110 } 2111 return ops, nil 2112 } 2113 2114 err = a.st.db().Run(buildTxn) 2115 return errors.Annotatef(err, "updating application base") 2116 } 2117 2118 // VerifySupportedBase verifies if the given base is supported by the 2119 // application. 2120 // TODO (stickupkid): This will be removed once we align all upgrade-machine 2121 // commands. 2122 func (a *Application) VerifySupportedBase(b Base) error { 2123 ch, _, err := a.Charm() 2124 if err != nil { 2125 return err 2126 } 2127 base, err := corebase.ParseBase(b.OS, b.Channel) 2128 if err != nil { 2129 return err 2130 } 2131 return corecharm.BaseIsCompatibleWithCharm(base, ch) 2132 } 2133 2134 // String returns the application name. 2135 func (a *Application) String() string { 2136 return a.doc.Name 2137 } 2138 2139 // Refresh refreshes the contents of the Application from the underlying 2140 // state. It returns an error that satisfies errors.IsNotFound if the 2141 // application has been removed. 2142 func (a *Application) Refresh() error { 2143 applications, closer := a.st.db().GetCollection(applicationsC) 2144 defer closer() 2145 2146 err := applications.FindId(a.doc.DocID).One(&a.doc) 2147 if err == mgo.ErrNotFound { 2148 return errors.NotFoundf("application %q", a) 2149 } 2150 if err != nil { 2151 return errors.Errorf("cannot refresh application %q: %v", a, err) 2152 } 2153 return nil 2154 } 2155 2156 // GetPlacement returns the application's placement directive. 2157 // This is used on CAAS models. 2158 func (a *Application) GetPlacement() string { 2159 return a.doc.Placement 2160 } 2161 2162 // GetScale returns the application's desired scale value. 2163 // This is used on CAAS models. 2164 func (a *Application) GetScale() int { 2165 return a.doc.DesiredScale 2166 } 2167 2168 // ChangeScale alters the existing scale by the provided change amount, returning the new amount. 2169 // This is used on CAAS models. 2170 func (a *Application) ChangeScale(scaleChange int) (int, error) { 2171 newScale := a.doc.DesiredScale + scaleChange 2172 logger.Tracef("ChangeScale DesiredScale %v, scaleChange %v, newScale %v", a.doc.DesiredScale, scaleChange, newScale) 2173 if newScale < 0 { 2174 return a.doc.DesiredScale, errors.NotValidf("cannot remove more units than currently exist") 2175 } 2176 buildTxn := func(attempt int) ([]txn.Op, error) { 2177 if attempt > 0 { 2178 if err := a.Refresh(); err != nil { 2179 return nil, errors.Trace(err) 2180 } 2181 alive, err := isAlive(a.st, applicationsC, a.doc.DocID) 2182 if err != nil { 2183 return nil, errors.Trace(err) 2184 } else if !alive { 2185 return nil, applicationNotAliveErr 2186 } 2187 newScale = a.doc.DesiredScale + scaleChange 2188 if newScale < 0 { 2189 return nil, errors.NotValidf("cannot remove more units than currently exist") 2190 } 2191 } 2192 ops := []txn.Op{{ 2193 C: applicationsC, 2194 Id: a.doc.DocID, 2195 Assert: bson.D{ 2196 {"life", Alive}, 2197 {"charmurl", a.doc.CharmURL}, 2198 {"unitcount", a.doc.UnitCount}, 2199 {"scale", a.doc.DesiredScale}, 2200 }, 2201 Update: bson.D{{"$set", bson.D{{"scale", newScale}}}}, 2202 }} 2203 2204 cloudSvcDoc := cloudServiceDoc{ 2205 DocID: a.globalKey(), 2206 DesiredScaleProtected: true, 2207 } 2208 cloudSvcOp, err := buildCloudServiceOps(a.st, cloudSvcDoc) 2209 if err != nil { 2210 return nil, errors.Trace(err) 2211 } 2212 ops = append(ops, cloudSvcOp...) 2213 return ops, nil 2214 } 2215 if err := a.st.db().Run(buildTxn); err != nil { 2216 return a.doc.DesiredScale, errors.Errorf("cannot set scale for application %q to %v: %v", a, newScale, onAbort(err, applicationNotAliveErr)) 2217 } 2218 a.doc.DesiredScale = newScale 2219 return newScale, nil 2220 } 2221 2222 // SetScale sets the application's desired scale value. 2223 // This is used on CAAS models. 2224 func (a *Application) SetScale(scale int, generation int64, force bool) error { 2225 if scale < 0 { 2226 return errors.NotValidf("application scale %d", scale) 2227 } 2228 svcInfo, err := a.ServiceInfo() 2229 if err != nil && !errors.IsNotFound(err) { 2230 return errors.Trace(err) 2231 } 2232 if err == nil { 2233 logger.Tracef( 2234 "SetScale DesiredScaleProtected %v, DesiredScale %v -> %v, Generation %v -> %v", 2235 svcInfo.DesiredScaleProtected(), a.doc.DesiredScale, scale, svcInfo.Generation(), generation, 2236 ) 2237 if svcInfo.DesiredScaleProtected() && !force && scale != a.doc.DesiredScale { 2238 return errors.Forbiddenf("SetScale(%d) without force while desired scale %d is not applied yet", scale, a.doc.DesiredScale) 2239 } 2240 if !force && generation < svcInfo.Generation() { 2241 return errors.Forbiddenf( 2242 "application generation %d can not be reverted to %d", svcInfo.Generation(), generation, 2243 ) 2244 } 2245 } 2246 2247 buildTxn := func(attempt int) ([]txn.Op, error) { 2248 if attempt > 0 { 2249 if err := a.Refresh(); err != nil { 2250 return nil, errors.Trace(err) 2251 } 2252 alive, err := isAlive(a.st, applicationsC, a.doc.DocID) 2253 if err != nil { 2254 return nil, errors.Trace(err) 2255 } else if !alive { 2256 return nil, applicationNotAliveErr 2257 } 2258 } 2259 ops := []txn.Op{{ 2260 C: applicationsC, 2261 Id: a.doc.DocID, 2262 Assert: bson.D{ 2263 {"life", Alive}, 2264 {"charmurl", a.doc.CharmURL}, 2265 {"unitcount", a.doc.UnitCount}, 2266 }, 2267 Update: bson.D{{"$set", bson.D{{"scale", scale}}}}, 2268 }} 2269 cloudSvcDoc := cloudServiceDoc{ 2270 DocID: a.globalKey(), 2271 } 2272 if force { 2273 // scale from cli. 2274 cloudSvcDoc.DesiredScaleProtected = true 2275 } else { 2276 // scale from cluster always has a valid generation (>= current generation). 2277 cloudSvcDoc.Generation = generation 2278 } 2279 cloudSvcOp, err := buildCloudServiceOps(a.st, cloudSvcDoc) 2280 if err != nil { 2281 return nil, errors.Trace(err) 2282 } 2283 ops = append(ops, cloudSvcOp...) 2284 return ops, nil 2285 } 2286 if err := a.st.db().Run(buildTxn); err != nil { 2287 return errors.Errorf("cannot set scale for application %q to %v: %v", a, scale, onAbort(err, applicationNotAliveErr)) 2288 } 2289 a.doc.DesiredScale = scale 2290 return nil 2291 } 2292 2293 // ClearResources sets the application's pending resources to false. 2294 // This is used on CAAS models. 2295 func (a *Application) ClearResources() error { 2296 if a.doc.Life == Alive { 2297 return errors.Errorf("application %q is alive", a.doc.Name) 2298 } 2299 buildTxn := func(attempt int) ([]txn.Op, error) { 2300 if attempt > 0 { 2301 if err := a.Refresh(); err != nil { 2302 return nil, errors.Trace(err) 2303 } 2304 if !a.doc.HasResources { 2305 return nil, jujutxn.ErrNoOperations 2306 } 2307 } 2308 ops := []txn.Op{{ 2309 C: applicationsC, 2310 Id: a.doc.DocID, 2311 Assert: bson.D{ 2312 {"life", bson.M{"$ne": Alive}}, 2313 {"charmurl", a.doc.CharmURL}, 2314 {"unitcount", a.doc.UnitCount}, 2315 {"has-resources", true}}, 2316 Update: bson.D{{"$set", bson.D{{"has-resources", false}}}}, 2317 }} 2318 logger.Debugf("application %q now has no cluster resources, scheduling cleanup", a.doc.Name) 2319 cleanupOp := newCleanupOp( 2320 cleanupApplication, 2321 a.doc.Name, 2322 false, // force 2323 false, // destroy storage 2324 ) 2325 return append(ops, cleanupOp), nil 2326 } 2327 if err := a.st.db().Run(buildTxn); err != nil { 2328 return errors.Errorf("cannot clear cluster resources for application %q: %v", a, onAbort(err, applicationNotAliveErr)) 2329 } 2330 a.doc.HasResources = false 2331 return nil 2332 } 2333 2334 // newUnitName returns the next unit name. 2335 func (a *Application) newUnitName() (string, error) { 2336 unitSeq, err := sequence(a.st, a.Tag().String()) 2337 if err != nil { 2338 return "", errors.Trace(err) 2339 } 2340 name := a.doc.Name + "/" + strconv.Itoa(unitSeq) 2341 return name, nil 2342 } 2343 2344 // addUnitOps returns a unique name for a new unit, and a list of txn operations 2345 // necessary to create that unit. The principalName param must be non-empty if 2346 // and only if s is a subordinate application. Only one subordinate of a given 2347 // application will be assigned to a given principal. The asserts param can be used 2348 // to include additional assertions for the application document. This method 2349 // assumes that the application already exists in the db. 2350 func (a *Application) addUnitOps( 2351 principalName string, 2352 args AddUnitParams, 2353 asserts bson.D, 2354 ) (string, []txn.Op, error) { 2355 var cons constraints.Value 2356 if !a.doc.Subordinate { 2357 scons, err := a.Constraints() 2358 if errors.IsNotFound(err) { 2359 return "", nil, errors.NotFoundf("application %q", a.Name()) 2360 } 2361 if err != nil { 2362 return "", nil, errors.Trace(err) 2363 } 2364 cons, err = a.st.ResolveConstraints(scons) 2365 if err != nil { 2366 return "", nil, errors.Trace(err) 2367 } 2368 // If the application is deployed to the controller model and the charm 2369 // has the special juju- prefix to its name, then bypass the machineID 2370 // empty check. 2371 if args.machineID != "" && a.st.IsController() { 2372 curl, err := charm.ParseURL(*a.doc.CharmURL) 2373 if err != nil { 2374 return "", nil, errors.Trace(err) 2375 } 2376 if !strings.HasPrefix(curl.Name, "juju-") { 2377 return "", nil, errors.NotSupportedf("non-empty machineID") 2378 } 2379 } else if args.machineID != "" { 2380 return "", nil, errors.NotSupportedf("non-empty machineID") 2381 } 2382 } 2383 storageCons, err := a.StorageConstraints() 2384 if err != nil { 2385 return "", nil, errors.Trace(err) 2386 } 2387 uNames, ops, err := a.addUnitOpsWithCons(applicationAddUnitOpsArgs{ 2388 cons: cons, 2389 principalName: principalName, 2390 principalMachineID: args.machineID, 2391 storageCons: storageCons, 2392 attachStorage: args.AttachStorage, 2393 providerId: args.ProviderId, 2394 address: args.Address, 2395 ports: args.Ports, 2396 unitName: args.UnitName, 2397 passwordHash: args.PasswordHash, 2398 }) 2399 if err != nil { 2400 return uNames, ops, errors.Trace(err) 2401 } 2402 // we verify the application is alive 2403 asserts = append(isAliveDoc, asserts...) 2404 ops = append(ops, a.incUnitCountOp(asserts)) 2405 return uNames, ops, nil 2406 } 2407 2408 type applicationAddUnitOpsArgs struct { 2409 principalName string 2410 principalMachineID string 2411 2412 cons constraints.Value 2413 storageCons map[string]StorageConstraints 2414 attachStorage []names.StorageTag 2415 2416 // These optional attributes are relevant to CAAS models. 2417 providerId *string 2418 address *string 2419 ports *[]string 2420 unitName *string 2421 passwordHash *string 2422 } 2423 2424 // addUnitOpsWithCons is a helper method for returning addUnitOps. 2425 func (a *Application) addUnitOpsWithCons(args applicationAddUnitOpsArgs) (string, []txn.Op, error) { 2426 if a.doc.Subordinate && args.principalName == "" { 2427 return "", nil, errors.New("application is a subordinate") 2428 } else if !a.doc.Subordinate && args.principalName != "" { 2429 return "", nil, errors.New("application is not a subordinate") 2430 } 2431 var name string 2432 if args.unitName != nil { 2433 name = *args.unitName 2434 } else { 2435 newName, err := a.newUnitName() 2436 if err != nil { 2437 return "", nil, errors.Trace(err) 2438 } 2439 name = newName 2440 } 2441 unitTag := names.NewUnitTag(name) 2442 2443 appCharm, _, err := a.Charm() 2444 if err != nil { 2445 return "", nil, errors.Trace(err) 2446 } 2447 storageOps, numStorageAttachments, err := a.addUnitStorageOps( 2448 args, unitTag, appCharm, 2449 ) 2450 if err != nil { 2451 return "", nil, errors.Trace(err) 2452 } 2453 2454 docID := a.st.docID(name) 2455 globalKey := unitGlobalKey(name) 2456 agentGlobalKey := unitAgentGlobalKey(name) 2457 platform := a.CharmOrigin().Platform 2458 base := Base{OS: platform.OS, Channel: platform.Channel}.Normalise() 2459 udoc := &unitDoc{ 2460 DocID: docID, 2461 Name: name, 2462 Application: a.doc.Name, 2463 Base: base, 2464 Life: Alive, 2465 Principal: args.principalName, 2466 MachineId: args.principalMachineID, 2467 StorageAttachmentCount: numStorageAttachments, 2468 } 2469 if args.passwordHash != nil { 2470 udoc.PasswordHash = *args.passwordHash 2471 } 2472 now := a.st.clock().Now() 2473 agentStatusDoc := statusDoc{ 2474 Status: status.Allocating, 2475 Updated: now.UnixNano(), 2476 } 2477 2478 m, err := a.st.Model() 2479 if err != nil { 2480 return "", nil, errors.Trace(err) 2481 } 2482 unitStatusDoc := &statusDoc{ 2483 Status: status.Waiting, 2484 StatusInfo: status.MessageInstallingAgent, 2485 Updated: now.UnixNano(), 2486 } 2487 meterStatus := &meterStatusDoc{Code: MeterNotSet.String()} 2488 2489 workloadVersionDoc := &statusDoc{ 2490 Status: status.Unknown, 2491 Updated: now.UnixNano(), 2492 } 2493 if m.Type() != ModelTypeCAAS { 2494 unitStatusDoc.StatusInfo = status.MessageWaitForMachine 2495 } 2496 var containerDoc *cloudContainerDoc 2497 if m.Type() == ModelTypeCAAS { 2498 if args.providerId != nil || args.address != nil || args.ports != nil { 2499 containerDoc = &cloudContainerDoc{ 2500 Id: globalKey, 2501 } 2502 if args.providerId != nil { 2503 containerDoc.ProviderId = *args.providerId 2504 } 2505 if args.address != nil { 2506 networkAddr := network.NewSpaceAddress(*args.address, network.WithScope(network.ScopeMachineLocal)) 2507 addr := fromNetworkAddress(networkAddr, network.OriginProvider) 2508 containerDoc.Address = &addr 2509 } 2510 if args.ports != nil { 2511 containerDoc.Ports = *args.ports 2512 } 2513 } 2514 } 2515 2516 ops, err := addUnitOps(a.st, addUnitOpsArgs{ 2517 unitDoc: udoc, 2518 containerDoc: containerDoc, 2519 agentStatusDoc: agentStatusDoc, 2520 workloadStatusDoc: unitStatusDoc, 2521 workloadVersionDoc: workloadVersionDoc, 2522 meterStatusDoc: meterStatus, 2523 }) 2524 if err != nil { 2525 return "", nil, errors.Trace(err) 2526 } 2527 2528 ops = append(ops, storageOps...) 2529 2530 if a.doc.Subordinate { 2531 ops = append(ops, txn.Op{ 2532 C: unitsC, 2533 Id: a.st.docID(args.principalName), 2534 Assert: append(isAliveDoc, bson.DocElem{ 2535 "subordinates", bson.D{{"$not", bson.RegEx{Pattern: "^" + a.doc.Name + "/"}}}, 2536 }), 2537 Update: bson.D{{"$addToSet", bson.D{{"subordinates", name}}}}, 2538 }) 2539 } else { 2540 ops = append(ops, createConstraintsOp(agentGlobalKey, args.cons)) 2541 } 2542 2543 // At the last moment we still have the statusDocs in scope, set the initial 2544 // history entries. This is risky, and may lead to extra entries, but that's 2545 // an intrinsic problem with mixing txn and non-txn ops -- we can't sync 2546 // them cleanly. 2547 _, _ = probablyUpdateStatusHistory(a.st.db(), globalKey, *unitStatusDoc) 2548 _, _ = probablyUpdateStatusHistory(a.st.db(), globalWorkloadVersionKey(name), *workloadVersionDoc) 2549 _, _ = probablyUpdateStatusHistory(a.st.db(), agentGlobalKey, agentStatusDoc) 2550 return name, ops, nil 2551 } 2552 2553 func (a *Application) addUnitStorageOps( 2554 args applicationAddUnitOpsArgs, 2555 unitTag names.UnitTag, 2556 charm *Charm, 2557 ) ([]txn.Op, int, error) { 2558 sb, err := NewStorageBackend(a.st) 2559 if err != nil { 2560 return nil, -1, errors.Trace(err) 2561 } 2562 2563 // Reduce the count of new storage created for each existing storage 2564 // being attached. 2565 var storageCons map[string]StorageConstraints 2566 for _, tag := range args.attachStorage { 2567 storageName, err := names.StorageName(tag.Id()) 2568 if err != nil { 2569 return nil, -1, errors.Trace(err) 2570 } 2571 if cons, ok := args.storageCons[storageName]; ok && cons.Count > 0 { 2572 if storageCons == nil { 2573 // We must not modify the contents of the original 2574 // args.storageCons map, as it comes from the 2575 // user. Make a copy and modify that. 2576 storageCons = make(map[string]StorageConstraints) 2577 for name, cons := range args.storageCons { 2578 storageCons[name] = cons 2579 } 2580 args.storageCons = storageCons 2581 } 2582 cons.Count-- 2583 storageCons[storageName] = cons 2584 } 2585 } 2586 2587 // Add storage instances/attachments for the unit. If the 2588 // application is subordinate, we'll add the machine storage 2589 // if the principal is assigned to a machine. Otherwise, we 2590 // will add the subordinate's storage along with the principal's 2591 // when the principal is assigned to a machine. 2592 var machineAssignable machineAssignable 2593 if a.doc.Subordinate { 2594 pu, err := a.st.Unit(args.principalName) 2595 if err != nil { 2596 return nil, -1, errors.Trace(err) 2597 } 2598 machineAssignable = pu 2599 } 2600 platform := a.CharmOrigin().Platform 2601 storageOps, storageTags, numStorageAttachments, err := createStorageOps( 2602 sb, 2603 unitTag, 2604 charm.Meta(), 2605 args.storageCons, 2606 platform.OS, 2607 machineAssignable, 2608 ) 2609 if err != nil { 2610 return nil, -1, errors.Trace(err) 2611 } 2612 for _, storageTag := range args.attachStorage { 2613 si, err := sb.storageInstance(storageTag) 2614 if err != nil { 2615 return nil, -1, errors.Annotatef( 2616 err, "attaching %s", 2617 names.ReadableString(storageTag), 2618 ) 2619 } 2620 ops, err := sb.attachStorageOps( 2621 si, 2622 unitTag, 2623 platform.OS, 2624 charm, 2625 machineAssignable, 2626 ) 2627 if err != nil { 2628 return nil, -1, errors.Trace(err) 2629 } 2630 storageOps = append(storageOps, ops...) 2631 numStorageAttachments++ 2632 storageTags[si.StorageName()] = append(storageTags[si.StorageName()], storageTag) 2633 } 2634 for name, tags := range storageTags { 2635 count := len(tags) 2636 charmStorage := charm.Meta().Storage[name] 2637 if err := validateCharmStorageCountChange(charmStorage, 0, count); err != nil { 2638 return nil, -1, errors.Trace(err) 2639 } 2640 incRefOp, err := increfEntityStorageOp(a.st, unitTag, name, count) 2641 if err != nil { 2642 return nil, -1, errors.Trace(err) 2643 } 2644 storageOps = append(storageOps, incRefOp) 2645 } 2646 return storageOps, numStorageAttachments, nil 2647 } 2648 2649 // applicationOffersRefCountKey returns a key for refcounting offers 2650 // for the specified application. Each time an offer is created, the 2651 // refcount is incremented, and the opposite happens on removal. 2652 func applicationOffersRefCountKey(appName string) string { 2653 return fmt.Sprintf("offer#%s", appName) 2654 } 2655 2656 // incApplicationOffersRefOp returns a txn.Op that increments the reference 2657 // count for an application offer. 2658 func incApplicationOffersRefOp(mb modelBackend, appName string) (txn.Op, error) { 2659 refcounts, closer := mb.db().GetCollection(refcountsC) 2660 defer closer() 2661 offerRefCountKey := applicationOffersRefCountKey(appName) 2662 incRefOp, err := nsRefcounts.CreateOrIncRefOp(refcounts, offerRefCountKey, 1) 2663 return incRefOp, errors.Trace(err) 2664 } 2665 2666 // newApplicationOffersRefOp returns a txn.Op that creates a new reference 2667 // count for an application offer, starting at the count supplied. Used in 2668 // model migration, where offers are created in bulk. 2669 func newApplicationOffersRefOp(mb modelBackend, appName string, startCount int) (txn.Op, error) { 2670 refcounts, closer := mb.db().GetCollection(refcountsC) 2671 defer closer() 2672 offerRefCountKey := applicationOffersRefCountKey(appName) 2673 incRefOp, err := nsRefcounts.CreateOrIncRefOp(refcounts, offerRefCountKey, startCount) 2674 return incRefOp, errors.Trace(err) 2675 } 2676 2677 // countApplicationOffersRefOp returns the number of offers for an application, 2678 // along with a txn.Op that ensures that that does not change. 2679 func countApplicationOffersRefOp(mb modelBackend, appName string) (txn.Op, int, error) { 2680 refcounts, closer := mb.db().GetCollection(refcountsC) 2681 defer closer() 2682 key := applicationOffersRefCountKey(appName) 2683 return nsRefcounts.CurrentOp(refcounts, key) 2684 } 2685 2686 // decApplicationOffersRefOp returns a txn.Op that decrements the reference 2687 // count for an application offer. 2688 func decApplicationOffersRefOp(mb modelBackend, appName string) (txn.Op, error) { 2689 refcounts, closer := mb.db().GetCollection(refcountsC) 2690 defer closer() 2691 offerRefCountKey := applicationOffersRefCountKey(appName) 2692 decRefOp, _, err := nsRefcounts.DyingDecRefOp(refcounts, offerRefCountKey) 2693 if err != nil { 2694 return txn.Op{}, errors.Trace(err) 2695 } 2696 return decRefOp, nil 2697 } 2698 2699 // incUnitCountOp returns the operation to increment the application's unit count. 2700 func (a *Application) incUnitCountOp(asserts bson.D) txn.Op { 2701 op := txn.Op{ 2702 C: applicationsC, 2703 Id: a.doc.DocID, 2704 Update: bson.D{{"$inc", bson.D{{"unitcount", 1}}}}, 2705 } 2706 if len(asserts) > 0 { 2707 op.Assert = asserts 2708 } 2709 return op 2710 } 2711 2712 // AddUnitParams contains parameters for the Application.AddUnit method. 2713 type AddUnitParams struct { 2714 // AttachStorage identifies storage instances to attach to the unit. 2715 AttachStorage []names.StorageTag 2716 2717 // These attributes are relevant to CAAS models. 2718 2719 // ProviderId identifies the unit for a given provider. 2720 ProviderId *string 2721 2722 // Address is the container address. 2723 Address *string 2724 2725 // Ports are the open ports on the container. 2726 Ports *[]string 2727 2728 // UnitName is for CAAS models when creating stateful units. 2729 UnitName *string 2730 2731 // machineID is only passed in if the unit being created is 2732 // a subordinate and refers to the machine that is hosting the principal. 2733 machineID string 2734 2735 // PasswordHash is only passed for CAAS sidecar units on creation. 2736 PasswordHash *string 2737 } 2738 2739 // AddUnit adds a new principal unit to the application. 2740 func (a *Application) AddUnit(args AddUnitParams) (unit *Unit, err error) { 2741 defer errors.DeferredAnnotatef(&err, "cannot add unit to application %q", a) 2742 name, ops, err := a.addUnitOps("", args, nil) 2743 if err != nil { 2744 return nil, err 2745 } 2746 2747 if err := a.st.db().RunTransaction(ops); err == txn.ErrAborted { 2748 if alive, err := isAlive(a.st, applicationsC, a.doc.DocID); err != nil { 2749 return nil, err 2750 } else if !alive { 2751 return nil, applicationNotAliveErr 2752 } 2753 return nil, errors.New("inconsistent state") 2754 } else if err != nil { 2755 return nil, err 2756 } 2757 return a.st.Unit(name) 2758 } 2759 2760 // UpsertCAASUnitParams is passed to UpsertCAASUnit to describe how to create or how to find and 2761 // update an existing unit for sidecar CAAS application. 2762 type UpsertCAASUnitParams struct { 2763 AddUnitParams 2764 2765 // OrderedScale is always true. It represents a mapping of OrderedId to Unit ID. 2766 OrderedScale bool 2767 // OrderedId is the stable ordinal index of the "pod". 2768 OrderedId int 2769 2770 // ObservedAttachedVolumeIDs is the filesystem attachments observed to be attached by the infrastructure, 2771 // used to map existing attachments. 2772 ObservedAttachedVolumeIDs []string 2773 } 2774 2775 func (a *Application) UpsertCAASUnit(args UpsertCAASUnitParams) (*Unit, error) { 2776 if args.PasswordHash == nil { 2777 return nil, errors.NotValidf("password hash") 2778 } 2779 if args.ProviderId == nil { 2780 return nil, errors.NotValidf("provider id") 2781 } 2782 if !args.OrderedScale { 2783 return nil, errors.NewNotImplemented(nil, "upserting CAAS units not supported without ordered unit IDs") 2784 } 2785 if args.UnitName == nil { 2786 return nil, errors.NotValidf("nil unit name") 2787 } 2788 2789 sb, err := NewStorageBackend(a.st) 2790 if err != nil { 2791 return nil, errors.Trace(err) 2792 } 2793 2794 var unit *Unit 2795 err = a.st.db().Run(func(attempt int) ([]txn.Op, error) { 2796 if attempt > 0 { 2797 err := a.Refresh() 2798 if err != nil { 2799 return nil, errors.Trace(err) 2800 } 2801 } 2802 2803 if args.UnitName != nil { 2804 var err error 2805 if unit == nil { 2806 unit, err = a.st.Unit(*args.UnitName) 2807 } else { 2808 err = unit.Refresh() 2809 } 2810 if errors.Is(err, errors.NotFound) { 2811 unit = nil 2812 } else if err != nil { 2813 return nil, errors.Trace(err) 2814 } 2815 } 2816 2817 // Try to reattach the storage that k8s has observed attached to this pod. 2818 for _, volumeId := range args.ObservedAttachedVolumeIDs { 2819 volume, err := sb.volume(bson.D{{"info.volumeid", volumeId}}, "") 2820 if errors.Is(err, errors.NotFound) { 2821 continue 2822 } else if err != nil { 2823 return nil, errors.Trace(err) 2824 } 2825 2826 volumeStorageId, err := volume.StorageInstance() 2827 if errors.Is(err, errors.NotAssigned) { 2828 continue 2829 } else if err != nil { 2830 return nil, errors.Trace(err) 2831 } 2832 2833 args.AddUnitParams.AttachStorage = append(args.AddUnitParams.AttachStorage, volumeStorageId) 2834 } 2835 2836 if unit == nil { 2837 return a.insertCAASUnitOps(args) 2838 } 2839 2840 if unit.Life() == Dead { 2841 return nil, errors.AlreadyExistsf("dead unit %q", unit.Tag().Id()) 2842 } 2843 2844 updateOps, err := unit.UpdateOperation(UnitUpdateProperties{ 2845 ProviderId: args.ProviderId, 2846 Address: args.Address, 2847 Ports: args.Ports, 2848 }).Build(attempt) 2849 if err != nil { 2850 return nil, errors.Trace(err) 2851 } 2852 2853 var ops []txn.Op 2854 if args.PasswordHash != nil { 2855 ops = append(ops, unit.setPasswordHashOps(*args.PasswordHash)...) // setPasswordHashOps asserts notDead 2856 } else { 2857 ops = append(ops, txn.Op{ 2858 C: unitsC, 2859 Id: unit.doc.DocID, 2860 Assert: notDeadDoc, 2861 }) 2862 } 2863 ops = append(ops, updateOps...) 2864 return ops, nil 2865 }) 2866 if err != nil { 2867 return nil, err 2868 } 2869 if unit == nil { 2870 unit, err = a.st.Unit(*args.UnitName) 2871 if err != nil { 2872 return nil, err 2873 } 2874 } else { 2875 err = unit.Refresh() 2876 if err != nil { 2877 return nil, err 2878 } 2879 } 2880 return unit, nil 2881 } 2882 2883 func (a *Application) insertCAASUnitOps(args UpsertCAASUnitParams) ([]txn.Op, error) { 2884 if args.UnitName == nil { 2885 return nil, errors.NotValidf("nil unit name") 2886 } 2887 2888 if ps := a.ProvisioningState(); args.OrderedId >= a.GetScale() || 2889 (ps != nil && ps.Scaling && args.OrderedId >= ps.ScaleTarget) { 2890 return nil, errors.NotAssignedf("unrequired unit %s is", *args.UnitName) 2891 } 2892 2893 _, addOps, err := a.addUnitOps("", args.AddUnitParams, nil) 2894 if err != nil { 2895 return nil, errors.Trace(err) 2896 } 2897 2898 ops := []txn.Op{{ 2899 C: applicationsC, 2900 Id: a.doc.DocID, 2901 Assert: bson.D{ 2902 {"life", Alive}, 2903 {"scale", a.GetScale()}, 2904 {"provisioning-state", a.ProvisioningState()}, 2905 }, 2906 }} 2907 ops = append(ops, addOps...) 2908 return ops, nil 2909 } 2910 2911 // removeUnitOps returns the operations necessary to remove the supplied unit, 2912 // assuming the supplied asserts apply to the unit document. 2913 // When 'force' is set, this call will always return some needed operations 2914 // and accumulate all operational errors encountered in the operation. 2915 // If the 'force' is not set, any error will be fatal and no operations will be returned. 2916 func (a *Application) removeUnitOps(u *Unit, asserts bson.D, op *ForcedOperation, destroyStorage bool) ([]txn.Op, error) { 2917 hostOps, err := u.destroyHostOps(a, op) 2918 if op.FatalError(err) { 2919 return nil, errors.Trace(err) 2920 } 2921 portsOps, err := removePortsForUnitOps(a.st, u) 2922 if op.FatalError(err) { 2923 return nil, errors.Trace(err) 2924 } 2925 appPortsOps, err := removeApplicationPortsForUnitOps(a.st, u) 2926 if op.FatalError(err) { 2927 return nil, errors.Trace(err) 2928 } 2929 resOps, err := removeUnitResourcesOps(a.st, u.doc.Name) 2930 if op.FatalError(err) { 2931 return nil, errors.Trace(err) 2932 } 2933 secretScopedPermissionsOps, err := a.st.removeScopedSecretPermissionOps(u.Tag()) 2934 if op.FatalError(err) { 2935 return nil, errors.Trace(err) 2936 } 2937 secretConsumerPermissionsOps, err := a.st.removeConsumerSecretPermissionOps(u.Tag()) 2938 if op.FatalError(err) { 2939 return nil, errors.Trace(err) 2940 } 2941 secretOwnerLabelOps, err := a.st.removeOwnerSecretLabelsOps(u.Tag()) 2942 if op.FatalError(err) { 2943 return nil, errors.Trace(err) 2944 } 2945 secretConsumerLabelOps, err := a.st.removeConsumerSecretLabelsOps(u.Tag()) 2946 if op.FatalError(err) { 2947 return nil, errors.Trace(err) 2948 } 2949 2950 observedFieldsMatch := bson.D{ 2951 {"charmurl", u.doc.CharmURL}, 2952 {"machineid", u.doc.MachineId}, 2953 } 2954 ops := []txn.Op{ 2955 { 2956 C: unitsC, 2957 Id: u.doc.DocID, 2958 Assert: append(observedFieldsMatch, asserts...), 2959 Remove: true, 2960 }, 2961 removeMeterStatusOp(a.st, u.globalMeterStatusKey()), 2962 removeStatusOp(a.st, u.globalAgentKey()), 2963 removeStatusOp(a.st, u.globalKey()), 2964 removeStatusOp(a.st, u.globalWorkloadVersionKey()), 2965 removeUnitStateOp(a.st, u.globalKey()), 2966 removeStatusOp(a.st, u.globalCloudContainerKey()), 2967 removeConstraintsOp(u.globalAgentKey()), 2968 annotationRemoveOp(a.st, u.globalKey()), 2969 newCleanupOp(cleanupRemovedUnit, u.doc.Name, op.Force), 2970 } 2971 ops = append(ops, portsOps...) 2972 ops = append(ops, appPortsOps...) 2973 ops = append(ops, resOps...) 2974 ops = append(ops, hostOps...) 2975 ops = append(ops, secretScopedPermissionsOps...) 2976 ops = append(ops, secretConsumerPermissionsOps...) 2977 ops = append(ops, secretOwnerLabelOps...) 2978 ops = append(ops, secretConsumerLabelOps...) 2979 2980 m, err := a.st.Model() 2981 if err != nil { 2982 return nil, errors.Trace(err) 2983 } 2984 if m.Type() == ModelTypeCAAS { 2985 ops = append(ops, u.removeCloudContainerOps()...) 2986 ops = append(ops, newCleanupOp(cleanupDyingUnitResources, u.doc.Name, op.Force, op.MaxWait)) 2987 } 2988 branchOps, err := unassignUnitFromBranchOp(u.doc.Name, a.doc.Name, m) 2989 if err != nil { 2990 if !op.Force { 2991 return nil, errors.Trace(err) 2992 } 2993 op.AddError(err) 2994 } 2995 ops = append(ops, branchOps...) 2996 2997 sb, err := NewStorageBackend(a.st) 2998 if err != nil { 2999 return nil, errors.Trace(err) 3000 } 3001 storageInstanceOps, err := removeStorageInstancesOps(sb, u.Tag(), op.Force) 3002 if op.FatalError(err) { 3003 return nil, errors.Trace(err) 3004 } 3005 ops = append(ops, storageInstanceOps...) 3006 3007 if u.doc.CharmURL != nil { 3008 // If the unit has a different URL to the application, allow any final 3009 // cleanup to happen; otherwise we just do it when the app itself is removed. 3010 maybeDoFinal := *u.doc.CharmURL != *a.doc.CharmURL 3011 3012 // When 'force' is set, this call will return both needed operations 3013 // as well as all operational errors encountered. 3014 // If the 'force' is not set, any error will be fatal and no operations will be returned. 3015 decOps, err := appCharmDecRefOps(a.st, a.doc.Name, u.doc.CharmURL, maybeDoFinal, op) 3016 if errors.IsNotFound(err) { 3017 return nil, errRefresh 3018 } else if op.FatalError(err) { 3019 return nil, errors.Trace(err) 3020 } 3021 ops = append(ops, decOps...) 3022 } 3023 appOp := txn.Op{ 3024 C: applicationsC, 3025 Id: a.doc.DocID, 3026 Assert: bson.D{{"life", a.doc.Life}, {"unitcount", bson.D{{"$gt", 0}}}}, 3027 Update: bson.D{{"$inc", bson.D{{"unitcount", -1}}}}, 3028 } 3029 ops = append(ops, appOp) 3030 if a.doc.Life == Dying { 3031 // Create a cleanup for this application as this might be the last reference. 3032 cleanupOp := newCleanupOp( 3033 cleanupApplication, 3034 a.doc.Name, 3035 destroyStorage, 3036 op.Force, 3037 ) 3038 ops = append(ops, cleanupOp) 3039 } 3040 return ops, nil 3041 } 3042 3043 func removeUnitResourcesOps(st *State, unitID string) ([]txn.Op, error) { 3044 resources := st.resources() 3045 ops, err := resources.removeUnitResourcesOps(unitID) 3046 if err != nil { 3047 return nil, errors.Trace(err) 3048 } 3049 return ops, nil 3050 } 3051 3052 func unassignUnitFromBranchOp(unitName, appName string, m *Model) ([]txn.Op, error) { 3053 branch, err := m.unitBranch(unitName) 3054 if err != nil { 3055 return nil, errors.Trace(err) 3056 } 3057 if branch == nil { 3058 // Nothing to see here, move along. 3059 return nil, nil 3060 } 3061 return branch.unassignUnitOps(unitName, appName), nil 3062 } 3063 3064 // AllUnits returns all units of the application. 3065 func (a *Application) AllUnits() (units []*Unit, err error) { 3066 return allUnits(a.st, a.doc.Name) 3067 } 3068 3069 func allUnits(st *State, application string) (units []*Unit, err error) { 3070 unitsCollection, closer := st.db().GetCollection(unitsC) 3071 defer closer() 3072 3073 docs := []unitDoc{} 3074 err = unitsCollection.Find(bson.D{{"application", application}}).All(&docs) 3075 if err != nil { 3076 return nil, errors.Annotatef(err, "cannot get all units from application %q", application) 3077 } 3078 m, err := st.Model() 3079 if err != nil { 3080 return nil, errors.Trace(err) 3081 } 3082 for i := range docs { 3083 units = append(units, newUnit(st, m.Type(), &docs[i])) 3084 } 3085 return units, nil 3086 } 3087 3088 // Relations returns a Relation for every relation the application is in. 3089 func (a *Application) Relations() (relations []*Relation, err error) { 3090 return matchingRelations(a.st, a.doc.Name) 3091 } 3092 3093 // matchingRelations returns all relations matching the application(s)/endpoint(s) provided 3094 // There must be 1 or 2 supplied names, of the form <application>[:<relation>] 3095 func matchingRelations(st *State, names ...string) (relations []*Relation, err error) { 3096 defer errors.DeferredAnnotatef(&err, "can't get relations matching %q", strings.Join(names, " ")) 3097 relationsCollection, closer := st.db().GetCollection(relationsC) 3098 defer closer() 3099 3100 var conditions []bson.D 3101 for _, name := range names { 3102 appName, relName, err := splitEndpointName(name) 3103 if err != nil { 3104 return nil, err 3105 } 3106 if relName == "" { 3107 conditions = append(conditions, bson.D{{"endpoints.applicationname", appName}}) 3108 } else { 3109 conditions = append(conditions, bson.D{{"endpoints", bson.D{{"$elemMatch", bson.D{ 3110 {"applicationname", appName}, 3111 {"relation.name", relName}, 3112 }}}}}) 3113 } 3114 } 3115 3116 docs := []relationDoc{} 3117 err = relationsCollection.Find(bson.D{{ 3118 "$and", conditions, 3119 }}).All(&docs) 3120 3121 if err != nil { 3122 return nil, err 3123 } 3124 for _, v := range docs { 3125 relations = append(relations, newRelation(st, &v)) 3126 } 3127 return relations, nil 3128 } 3129 3130 // CharmConfig returns the raw user configuration for the application's charm. 3131 func (a *Application) CharmConfig(branchName string) (charm.Settings, error) { 3132 if a.doc.CharmURL == nil { 3133 return nil, fmt.Errorf("application charm not set") 3134 } 3135 3136 s, err := charmSettingsWithDefaults(a.st, a.doc.CharmURL, a.Name(), branchName) 3137 return s, errors.Annotatef(err, "charm config for application %q", a.doc.Name) 3138 } 3139 3140 func charmSettingsWithDefaults(st *State, cURL *string, appName, branchName string) (charm.Settings, error) { 3141 cfg, err := branchCharmSettings(st, cURL, appName, branchName) 3142 if err != nil { 3143 return nil, errors.Trace(err) 3144 } 3145 3146 ch, err := st.Charm(*cURL) 3147 if err != nil { 3148 return nil, errors.Trace(err) 3149 } 3150 3151 result := ch.Config().DefaultSettings() 3152 for name, value := range cfg.Map() { 3153 result[name] = value 3154 } 3155 return result, nil 3156 } 3157 3158 func branchCharmSettings(st *State, cURL *string, appName, branchName string) (*Settings, error) { 3159 key := applicationCharmConfigKey(appName, cURL) 3160 cfg, err := readSettings(st.db(), settingsC, key) 3161 if err != nil { 3162 return nil, errors.Trace(err) 3163 } 3164 3165 if branchName != model.GenerationMaster { 3166 branch, err := st.Branch(branchName) 3167 if err != nil { 3168 return nil, errors.Trace(err) 3169 } 3170 cfg.applyChanges(branch.Config()[appName]) 3171 } 3172 3173 return cfg, nil 3174 } 3175 3176 // UpdateCharmConfig changes a application's charm config settings. Values set 3177 // to nil will be deleted; unknown and invalid values will return an error. 3178 func (a *Application) UpdateCharmConfig(branchName string, changes charm.Settings) error { 3179 ch, _, err := a.Charm() 3180 if err != nil { 3181 return errors.Trace(err) 3182 } 3183 changes, err = ch.Config().ValidateSettings(changes) 3184 if err != nil { 3185 return errors.Trace(err) 3186 } 3187 3188 // TODO(fwereade) state.Settings is itself really problematic in just 3189 // about every use case. This needs to be resolved some time; but at 3190 // least the settings docs are keyed by charm url as well as application 3191 // name, so the actual impact of a race is non-threatening. 3192 current, err := readSettings(a.st.db(), settingsC, a.charmConfigKey()) 3193 if err != nil { 3194 return errors.Annotatef(err, "charm config for application %q", a.doc.Name) 3195 } 3196 3197 if branchName == model.GenerationMaster { 3198 return errors.Trace(a.updateMasterConfig(current, changes)) 3199 } 3200 return errors.Trace(a.updateBranchConfig(branchName, current, changes)) 3201 } 3202 3203 // TODO (manadart 2019-04-03): Implement master config changes as 3204 // instantly committed branches. 3205 func (a *Application) updateMasterConfig(current *Settings, validChanges charm.Settings) error { 3206 for name, value := range validChanges { 3207 if value == nil { 3208 current.Delete(name) 3209 } else { 3210 current.Set(name, value) 3211 } 3212 } 3213 _, err := current.Write() 3214 return errors.Trace(err) 3215 } 3216 3217 // updateBranchConfig compares the incoming charm settings to the current 3218 // settings to generate a collection of changes, which is used to update the 3219 // branch with the input name. 3220 func (a *Application) updateBranchConfig(branchName string, current *Settings, validChanges charm.Settings) error { 3221 branch, err := a.st.Branch(branchName) 3222 if err != nil { 3223 return errors.Trace(err) 3224 } 3225 3226 return errors.Trace(branch.UpdateCharmConfig(a.Name(), current, validChanges)) 3227 } 3228 3229 // ApplicationConfig returns the configuration for the application itself. 3230 func (a *Application) ApplicationConfig() (config.ConfigAttributes, error) { 3231 cfg, err := readSettings(a.st.db(), settingsC, a.applicationConfigKey()) 3232 if err != nil { 3233 if errors.IsNotFound(err) { 3234 return config.ConfigAttributes{}, nil 3235 } 3236 return nil, errors.Annotatef(err, "application config for application %q", a.doc.Name) 3237 } 3238 3239 if len(cfg.Keys()) == 0 { 3240 return config.ConfigAttributes{}, nil 3241 } 3242 return cfg.Map(), nil 3243 } 3244 3245 // UpdateApplicationConfig changes an application's config settings. 3246 // Unknown and invalid values will return an error. 3247 func (a *Application) UpdateApplicationConfig( 3248 changes config.ConfigAttributes, 3249 reset []string, 3250 schema environschema.Fields, 3251 defaults schema.Defaults, 3252 ) error { 3253 node, err := readSettings(a.st.db(), settingsC, a.applicationConfigKey()) 3254 if errors.IsNotFound(err) { 3255 return errors.Errorf("cannot update application config since no config exists for application %v", a.doc.Name) 3256 } else if err != nil { 3257 return errors.Annotatef(err, "application config for application %q", a.doc.Name) 3258 } 3259 resetKeys := set.NewStrings(reset...) 3260 for name, value := range changes { 3261 if resetKeys.Contains(name) { 3262 continue 3263 } 3264 node.Set(name, value) 3265 } 3266 for _, name := range reset { 3267 node.Delete(name) 3268 } 3269 newConfig, err := config.NewConfig(node.Map(), schema, defaults) 3270 if err != nil { 3271 return errors.Trace(err) 3272 } 3273 if err := newConfig.Validate(); err != nil { 3274 return errors.Trace(err) 3275 } 3276 // Update node so it gets coerced values with correct types. 3277 coerced := newConfig.Attributes() 3278 for _, key := range node.Keys() { 3279 node.Set(key, coerced[key]) 3280 } 3281 _, err = node.Write() 3282 return err 3283 } 3284 3285 // LeaderSettings returns a application's leader settings. If nothing has been set 3286 // yet, it will return an empty map; this is not an error. 3287 func (a *Application) LeaderSettings() (map[string]string, error) { 3288 // There's no compelling reason to have these methods on Application -- and 3289 // thus require an extra db read to access them -- but it stops the State 3290 // type getting even more cluttered. 3291 3292 doc, err := readSettingsDoc(a.st.db(), settingsC, leadershipSettingsKey(a.doc.Name)) 3293 if errors.IsNotFound(err) { 3294 return nil, errors.NotFoundf("application %q", a.doc.Name) 3295 } else if err != nil { 3296 return nil, errors.Annotatef(err, "application %q", a.doc.Name) 3297 } 3298 result := make(map[string]string) 3299 for escapedKey, interfaceValue := range doc.Settings { 3300 key := mgoutils.UnescapeKey(escapedKey) 3301 if value, _ := interfaceValue.(string); value != "" { 3302 // Empty strings are technically bad data -- when set, they clear. 3303 result[key] = value 3304 } else { 3305 // Some bad data isn't reason enough to obscure the good data. 3306 logger.Warningf("unexpected leader settings value for %s: %#v", key, interfaceValue) 3307 } 3308 } 3309 return result, nil 3310 } 3311 3312 // UpdateLeaderSettings updates the application's leader settings with the supplied 3313 // values, but will fail (with a suitable error) if the supplied Token loses 3314 // validity. Empty values in the supplied map will be cleared in the database. 3315 func (a *Application) UpdateLeaderSettings(token leadership.Token, updates map[string]string) error { 3316 // There's no compelling reason to have these methods on Application -- and 3317 // thus require an extra db read to access them -- but it stops the State 3318 // type getting even more cluttered. 3319 key := leadershipSettingsKey(a.doc.Name) 3320 converted := make(map[string]interface{}, len(updates)) 3321 for k, v := range updates { 3322 converted[k] = v 3323 } 3324 3325 modelOp := newUpdateLeaderSettingsOperation(a.st.db(), token, key, converted) 3326 err := a.st.ApplyOperation(modelOp) 3327 if errors.IsNotFound(err) { 3328 return errors.NotFoundf("application %q", a.doc.Name) 3329 } else if err != nil { 3330 return errors.Annotatef(err, "application %q", a.doc.Name) 3331 } 3332 return nil 3333 } 3334 3335 var ErrSubordinateConstraints = stderrors.New("constraints do not apply to subordinate applications") 3336 3337 // Constraints returns the current application constraints. 3338 func (a *Application) Constraints() (constraints.Value, error) { 3339 if a.doc.Subordinate { 3340 return constraints.Value{}, ErrSubordinateConstraints 3341 } 3342 return readConstraints(a.st, a.globalKey()) 3343 } 3344 3345 // SetConstraints replaces the current application constraints. 3346 func (a *Application) SetConstraints(cons constraints.Value) (err error) { 3347 unsupported, err := a.st.validateConstraints(cons) 3348 if len(unsupported) > 0 { 3349 logger.Warningf( 3350 "setting constraints on application %q: unsupported constraints: %v", a.Name(), strings.Join(unsupported, ",")) 3351 } else if err != nil { 3352 return err 3353 } 3354 3355 if a.doc.Subordinate { 3356 return ErrSubordinateConstraints 3357 } 3358 3359 // If the architecture has already been set, do not allow the application 3360 // architecture to change. 3361 // 3362 // If the constraints returns a not found error, we don't actually care, 3363 // this implies that it's never been set and we want to just take all the 3364 // valid constraints. 3365 if current, consErr := a.Constraints(); !errors.IsNotFound(consErr) { 3366 if consErr != nil { 3367 return errors.Annotate(consErr, "unable to read constraints") 3368 } 3369 // If the incoming arch has a value we only care about that. If the 3370 // value is empty we can assume that we want the existing current value 3371 // that is set or not. 3372 if cons.Arch != nil && *cons.Arch != "" { 3373 if (current.Arch == nil || *current.Arch == "") && *cons.Arch != arch.DefaultArchitecture { 3374 return errors.NotSupportedf("changing architecture") 3375 } else if current.Arch != nil && *current.Arch != "" && *current.Arch != *cons.Arch { 3376 return errors.NotSupportedf("changing architecture (%s)", *current.Arch) 3377 } 3378 } 3379 } 3380 3381 defer errors.DeferredAnnotatef(&err, "cannot set constraints") 3382 if a.doc.Life != Alive { 3383 return applicationNotAliveErr 3384 } 3385 3386 ops := []txn.Op{{ 3387 C: applicationsC, 3388 Id: a.doc.DocID, 3389 Assert: isAliveDoc, 3390 }} 3391 ops = append(ops, setConstraintsOp(a.globalKey(), cons)) 3392 return onAbort(a.st.db().RunTransaction(ops), applicationNotAliveErr) 3393 } 3394 3395 func assertApplicationAliveOp(docID string) txn.Op { 3396 return txn.Op{ 3397 C: applicationsC, 3398 Id: docID, 3399 Assert: isAliveDoc, 3400 } 3401 } 3402 3403 // OpenedPortRanges returns a ApplicationPortRanges object that can be used to query 3404 // and/or mutate the port ranges opened by the embedded k8s application. 3405 func (a *Application) OpenedPortRanges() (ApplicationPortRanges, error) { 3406 apr, err := getApplicationPortRanges(a.st, a.Name()) 3407 if err != nil { 3408 return nil, errors.Trace(err) 3409 } 3410 return apr, nil 3411 } 3412 3413 // EndpointBindings returns the mapping for each endpoint name and the space 3414 // ID it is bound to (or empty if unspecified). When no bindings are stored 3415 // for the application, defaults are returned. 3416 func (a *Application) EndpointBindings() (*Bindings, error) { 3417 // We don't need the TxnRevno below. 3418 bindings, _, err := readEndpointBindings(a.st, a.globalKey()) 3419 if err != nil && !errors.IsNotFound(err) { 3420 return nil, errors.Trace(err) 3421 } 3422 if bindings == nil { 3423 bindings, err = a.defaultEndpointBindings() 3424 if err != nil { 3425 return nil, errors.Trace(err) 3426 } 3427 } 3428 return &Bindings{st: a.st, bindingsMap: bindings}, nil 3429 } 3430 3431 // defaultEndpointBindings returns a map with each endpoint from the current 3432 // charm metadata bound to an empty space. If no charm URL is set yet, it 3433 // returns an empty map. 3434 func (a *Application) defaultEndpointBindings() (map[string]string, error) { 3435 if a.doc.CharmURL == nil { 3436 return map[string]string{}, nil 3437 } 3438 3439 appCharm, _, err := a.Charm() 3440 if err != nil { 3441 return nil, errors.Trace(err) 3442 } 3443 3444 return DefaultEndpointBindingsForCharm(a.st, appCharm.Meta()) 3445 } 3446 3447 // MetricCredentials returns any metric credentials associated with this application. 3448 func (a *Application) MetricCredentials() []byte { 3449 return a.doc.MetricCredentials 3450 } 3451 3452 // SetMetricCredentials updates the metric credentials associated with this application. 3453 func (a *Application) SetMetricCredentials(b []byte) error { 3454 buildTxn := func(attempt int) ([]txn.Op, error) { 3455 if attempt > 0 { 3456 alive, err := isAlive(a.st, applicationsC, a.doc.DocID) 3457 if err != nil { 3458 return nil, errors.Trace(err) 3459 } else if !alive { 3460 return nil, applicationNotAliveErr 3461 } 3462 } 3463 ops := []txn.Op{ 3464 { 3465 C: applicationsC, 3466 Id: a.doc.DocID, 3467 Assert: isAliveDoc, 3468 Update: bson.M{"$set": bson.M{"metric-credentials": b}}, 3469 }, 3470 } 3471 return ops, nil 3472 } 3473 if err := a.st.db().Run(buildTxn); err != nil { 3474 return errors.Annotatef(err, "cannot update metric credentials") 3475 } 3476 a.doc.MetricCredentials = b 3477 return nil 3478 } 3479 3480 // StorageConstraints returns the storage constraints for the application. 3481 func (a *Application) StorageConstraints() (map[string]StorageConstraints, error) { 3482 cons, err := readStorageConstraints(a.st, a.storageConstraintsKey()) 3483 if errors.IsNotFound(err) { 3484 return nil, nil 3485 } else if err != nil { 3486 return nil, errors.Annotatef(err, "application %q", a.doc.Name) 3487 } 3488 return cons, nil 3489 } 3490 3491 // DeviceConstraints returns the device constraints for the application. 3492 func (a *Application) DeviceConstraints() (map[string]DeviceConstraints, error) { 3493 cons, err := readDeviceConstraints(a.st, a.deviceConstraintsKey()) 3494 if errors.IsNotFound(err) { 3495 return nil, nil 3496 } else if err != nil { 3497 return nil, errors.Trace(err) 3498 } 3499 return cons, nil 3500 } 3501 3502 // Status returns the status of the application. 3503 // Only unit leaders are allowed to set the status of the application. 3504 // If no status is recorded, then there are no unit leaders and the 3505 // status is derived from the unit status values. 3506 func (a *Application) Status() (status.StatusInfo, error) { 3507 info, err := getStatus(a.st.db(), a.globalKey(), "application") 3508 if err != nil { 3509 return status.StatusInfo{}, errors.Trace(err) 3510 } 3511 return info, nil 3512 } 3513 3514 // CheckApplicationExpectsWorkload checks if the application expects workload or not. 3515 func CheckApplicationExpectsWorkload(m *Model, appName string) (bool, error) { 3516 cm, err := m.CAASModel() 3517 if err != nil { 3518 // IAAS models alway have a unit workload. 3519 return true, nil 3520 } 3521 3522 // Check charm v2 3523 app, err := m.State().Application(appName) 3524 if err != nil { 3525 return false, errors.Trace(err) 3526 } 3527 ch, _, err := app.Charm() 3528 if err != nil { 3529 return false, errors.Trace(err) 3530 } 3531 3532 if charm.MetaFormat(ch) == charm.FormatV2 { 3533 return false, nil 3534 } 3535 3536 _, err = cm.PodSpec(names.NewApplicationTag(appName)) 3537 if err != nil && !errors.IsNotFound(err) { 3538 return false, errors.Trace(err) 3539 } 3540 return err == nil, nil 3541 } 3542 3543 // SetStatus sets the status for the application. 3544 func (a *Application) SetStatus(statusInfo status.StatusInfo) error { 3545 if !status.ValidWorkloadStatus(statusInfo.Status) { 3546 return errors.Errorf("cannot set invalid status %q", statusInfo.Status) 3547 } 3548 3549 var newHistory *statusDoc 3550 m, err := a.st.Model() 3551 if err != nil { 3552 return errors.Trace(err) 3553 } 3554 if m.Type() == ModelTypeCAAS { 3555 // Application status for a caas model needs to consider status 3556 // info coming from the operator pod as well; It may need to 3557 // override what is set here. 3558 expectWorkload, err := CheckApplicationExpectsWorkload(m, a.Name()) 3559 if err != nil { 3560 return errors.Trace(err) 3561 } 3562 operatorStatus, err := getStatus(a.st.db(), applicationGlobalOperatorKey(a.Name()), "operator") 3563 if err == nil { 3564 newHistory, err = caasHistoryRewriteDoc(statusInfo, operatorStatus, expectWorkload, status.ApplicationDisplayStatus, a.st.clock()) 3565 if err != nil { 3566 return errors.Trace(err) 3567 } 3568 } else if !errors.IsNotFound(err) { 3569 return errors.Trace(err) 3570 } 3571 } 3572 3573 return setStatus(a.st.db(), setStatusParams{ 3574 badge: "application", 3575 globalKey: a.globalKey(), 3576 status: statusInfo.Status, 3577 message: statusInfo.Message, 3578 rawData: statusInfo.Data, 3579 updated: timeOrNow(statusInfo.Since, a.st.clock()), 3580 historyOverwrite: newHistory, 3581 }) 3582 } 3583 3584 // SetOperatorStatus sets the operator status for an application. 3585 // This is used on CAAS models. 3586 func (a *Application) SetOperatorStatus(sInfo status.StatusInfo) error { 3587 m, err := a.st.Model() 3588 if err != nil { 3589 return errors.Trace(err) 3590 } 3591 if m.Type() != ModelTypeCAAS { 3592 return errors.NotSupportedf("caas operation on non-caas model") 3593 } 3594 3595 err = setStatus(a.st.db(), setStatusParams{ 3596 badge: "operator", 3597 globalKey: applicationGlobalOperatorKey(a.Name()), 3598 status: sInfo.Status, 3599 message: sInfo.Message, 3600 rawData: sInfo.Data, 3601 updated: timeOrNow(sInfo.Since, a.st.clock()), 3602 }) 3603 if err != nil { 3604 return errors.Trace(err) 3605 } 3606 appStatus, err := a.Status() 3607 if err != nil { 3608 return errors.Trace(err) 3609 } 3610 expectWorkload, err := CheckApplicationExpectsWorkload(m, a.Name()) 3611 if err != nil { 3612 return errors.Trace(err) 3613 } 3614 historyDoc, err := caasHistoryRewriteDoc(appStatus, sInfo, expectWorkload, status.ApplicationDisplayStatus, a.st.clock()) 3615 if err != nil { 3616 return errors.Trace(err) 3617 } 3618 if historyDoc != nil { 3619 // rewriting application status history 3620 _, err = probablyUpdateStatusHistory(a.st.db(), a.globalKey(), *historyDoc) 3621 if err != nil { 3622 return errors.Trace(err) 3623 } 3624 } 3625 return nil 3626 } 3627 3628 // StatusHistory returns a slice of at most filter.Size StatusInfo items 3629 // or items as old as filter.Date or items newer than now - filter.Delta time 3630 // representing past statuses for this application. 3631 func (a *Application) StatusHistory(filter status.StatusHistoryFilter) ([]status.StatusInfo, error) { 3632 args := &statusHistoryArgs{ 3633 db: a.st.db(), 3634 globalKey: a.globalKey(), 3635 filter: filter, 3636 clock: a.st.clock(), 3637 } 3638 return statusHistory(args) 3639 } 3640 3641 // UnitStatuses returns a map of unit names to their Status results (workload 3642 // status). 3643 func (a *Application) UnitStatuses() (map[string]status.StatusInfo, error) { 3644 col, closer := a.st.db().GetRawCollection(statusesC) 3645 defer closer() 3646 // Agent status is u#unit-name 3647 // Workload status is u#unit-name#charm 3648 selector := fmt.Sprintf("^%s:u#%s/\\d+(#charm)?$", a.st.ModelUUID(), a.doc.Name) 3649 var docs []statusDocWithID 3650 err := col.Find(bson.M{"_id": bson.M{"$regex": selector}}).All(&docs) 3651 if err != nil { 3652 return nil, errors.Trace(err) 3653 } 3654 result := make(map[string]status.StatusInfo) 3655 workload := make(map[string]status.StatusInfo) 3656 agent := make(map[string]status.StatusInfo) 3657 for _, doc := range docs { 3658 key := a.st.localID(doc.ID) 3659 parts := strings.Split(key, "#") 3660 // We know there will be at least two parts because the regex 3661 // specifies a #. 3662 unitName := parts[1] 3663 if strings.HasSuffix(key, "#charm") { 3664 workload[unitName] = doc.asStatusInfo() 3665 } else { 3666 agent[unitName] = doc.asStatusInfo() 3667 } 3668 } 3669 3670 // The reason for this dance is due to the way that hook errors 3671 // show up in status. See Unit.Status() for more details. 3672 for name, value := range agent { 3673 if value.Status == status.Error { 3674 result[name] = value 3675 } else { 3676 if workloadStatus, found := workload[name]; found { 3677 result[name] = workloadStatus 3678 } 3679 // If there is a missing workload status for the unit 3680 // it is possible that we are in the process of deleting the 3681 // unit. While dirty reads like this should be unusual, it 3682 // is possible. In these situations, we just don't return 3683 // a status for that unit. 3684 } 3685 } 3686 return result, nil 3687 } 3688 3689 type addApplicationOpsArgs struct { 3690 applicationDoc *applicationDoc 3691 statusDoc statusDoc 3692 constraints constraints.Value 3693 storage map[string]StorageConstraints 3694 devices map[string]DeviceConstraints 3695 applicationConfig map[string]interface{} 3696 charmConfig map[string]interface{} 3697 // These are nil when adding a new application, and most likely 3698 // non-nil when migrating. 3699 leadershipSettings map[string]interface{} 3700 operatorStatus *statusDoc 3701 } 3702 3703 // addApplicationOps returns the operations required to add an application to the 3704 // applications collection, along with all the associated expected other application 3705 // entries. This method is used by both the *State.AddApplication method and the 3706 // migration import code. 3707 func addApplicationOps(mb modelBackend, app *Application, args addApplicationOpsArgs) ([]txn.Op, error) { 3708 charmRefOps, err := appCharmIncRefOps(mb, args.applicationDoc.Name, args.applicationDoc.CharmURL, true) 3709 if err != nil { 3710 return nil, errors.Trace(err) 3711 } 3712 3713 globalKey := app.globalKey() 3714 charmConfigKey := app.charmConfigKey() 3715 applicationConfigKey := app.applicationConfigKey() 3716 storageConstraintsKey := app.storageConstraintsKey() 3717 deviceConstraintsKey := app.deviceConstraintsKey() 3718 leadershipKey := leadershipSettingsKey(app.Name()) 3719 3720 ops := []txn.Op{ 3721 createConstraintsOp(globalKey, args.constraints), 3722 createStorageConstraintsOp(storageConstraintsKey, args.storage), 3723 createDeviceConstraintsOp(deviceConstraintsKey, args.devices), 3724 createSettingsOp(settingsC, charmConfigKey, args.charmConfig), 3725 createSettingsOp(settingsC, applicationConfigKey, args.applicationConfig), 3726 createSettingsOp(settingsC, leadershipKey, args.leadershipSettings), 3727 createStatusOp(mb, globalKey, args.statusDoc), 3728 addModelApplicationRefOp(mb, app.Name()), 3729 } 3730 m, err := app.st.Model() 3731 if err != nil { 3732 return nil, errors.Trace(err) 3733 } 3734 if m.Type() == ModelTypeCAAS { 3735 operatorStatusDoc := args.statusDoc 3736 if args.operatorStatus != nil { 3737 operatorStatusDoc = *args.operatorStatus 3738 } 3739 ops = append(ops, createStatusOp(mb, applicationGlobalOperatorKey(app.Name()), operatorStatusDoc)) 3740 } 3741 3742 ops = append(ops, charmRefOps...) 3743 ops = append(ops, txn.Op{ 3744 C: applicationsC, 3745 Id: app.Name(), 3746 Assert: txn.DocMissing, 3747 Insert: args.applicationDoc, 3748 }) 3749 ops = append(ops, txn.Op{ 3750 C: remoteApplicationsC, 3751 Id: app.Name(), 3752 Assert: txn.DocMissing, 3753 }) 3754 return ops, nil 3755 } 3756 3757 // SetPassword sets the password for the application's agent. 3758 // TODO(caas) - consider a separate CAAS application entity 3759 func (a *Application) SetPassword(password string) error { 3760 if len(password) < utils.MinAgentPasswordLength { 3761 return fmt.Errorf("password is only %d bytes long, and is not a valid Agent password", len(password)) 3762 } 3763 passwordHash := utils.AgentPasswordHash(password) 3764 ops := []txn.Op{{ 3765 C: applicationsC, 3766 Id: a.doc.DocID, 3767 Assert: notDeadDoc, 3768 Update: bson.D{{"$set", bson.D{{"passwordhash", passwordHash}}}}, 3769 }} 3770 err := a.st.db().RunTransaction(ops) 3771 if err != nil { 3772 return fmt.Errorf("cannot set password of application %q: %v", a, onAbort(err, stateerrors.ErrDead)) 3773 } 3774 a.doc.PasswordHash = passwordHash 3775 return nil 3776 } 3777 3778 // PasswordValid returns whether the given password is valid 3779 // for the given application. 3780 func (a *Application) PasswordValid(password string) bool { 3781 agentHash := utils.AgentPasswordHash(password) 3782 return agentHash == a.doc.PasswordHash 3783 } 3784 3785 // UnitUpdateProperties holds information used to update 3786 // the state model for the unit. 3787 type UnitUpdateProperties struct { 3788 ProviderId *string 3789 Address *string 3790 Ports *[]string 3791 UnitName *string 3792 AgentStatus *status.StatusInfo 3793 UnitStatus *status.StatusInfo 3794 CloudContainerStatus *status.StatusInfo 3795 } 3796 3797 // UpdateUnits applies the given application unit update operations. 3798 func (a *Application) UpdateUnits(unitsOp *UpdateUnitsOperation) error { 3799 return a.st.ApplyOperation(unitsOp) 3800 } 3801 3802 // UpdateUnitsOperation is a model operation for updating 3803 // some units of an application. 3804 type UpdateUnitsOperation struct { 3805 Adds []*AddUnitOperation 3806 Deletes []*DestroyUnitOperation 3807 Updates []*UpdateUnitOperation 3808 } 3809 3810 func (op *UpdateUnitsOperation) allOps() []ModelOperation { 3811 var all []ModelOperation 3812 for _, mop := range op.Adds { 3813 all = append(all, mop) 3814 } 3815 for _, mop := range op.Updates { 3816 all = append(all, mop) 3817 } 3818 for _, mop := range op.Deletes { 3819 all = append(all, mop) 3820 } 3821 return all 3822 } 3823 3824 // Build is part of the ModelOperation interface. 3825 func (op *UpdateUnitsOperation) Build(attempt int) ([]txn.Op, error) { 3826 var ops []txn.Op 3827 3828 all := op.allOps() 3829 for _, txnOp := range all { 3830 switch nextOps, err := txnOp.Build(attempt); err { 3831 case jujutxn.ErrNoOperations: 3832 continue 3833 case nil: 3834 ops = append(ops, nextOps...) 3835 default: 3836 return nil, errors.Trace(err) 3837 } 3838 } 3839 return ops, nil 3840 } 3841 3842 // Done is part of the ModelOperation interface. 3843 func (op *UpdateUnitsOperation) Done(err error) error { 3844 if err != nil { 3845 return errors.Annotate(err, "updating units") 3846 } 3847 all := op.allOps() 3848 for _, op := range all { 3849 if err := op.Done(nil); err != nil { 3850 return errors.Trace(err) 3851 } 3852 } 3853 return nil 3854 } 3855 3856 // AddOperation returns a model operation that will add a unit. 3857 func (a *Application) AddOperation(props UnitUpdateProperties) *AddUnitOperation { 3858 return &AddUnitOperation{ 3859 application: &Application{st: a.st, doc: a.doc}, 3860 props: props, 3861 } 3862 } 3863 3864 // AddUnitOperation is a model operation that will add a unit. 3865 type AddUnitOperation struct { 3866 application *Application 3867 props UnitUpdateProperties 3868 3869 unitName string 3870 } 3871 3872 // Build is part of the ModelOperation interface. 3873 func (op *AddUnitOperation) Build(attempt int) ([]txn.Op, error) { 3874 if alive, err := isAlive(op.application.st, applicationsC, op.application.doc.DocID); err != nil { 3875 return nil, err 3876 } else if !alive { 3877 return nil, applicationNotAliveErr 3878 } 3879 3880 var ops []txn.Op 3881 3882 addUnitArgs := AddUnitParams{ 3883 ProviderId: op.props.ProviderId, 3884 Address: op.props.Address, 3885 Ports: op.props.Ports, 3886 UnitName: op.props.UnitName, 3887 } 3888 name, addOps, err := op.application.addUnitOps("", addUnitArgs, nil) 3889 if err != nil { 3890 return nil, errors.Trace(err) 3891 } 3892 3893 op.unitName = name 3894 ops = append(ops, addOps...) 3895 3896 if op.props.CloudContainerStatus != nil { 3897 now := op.application.st.clock().Now() 3898 doc := statusDoc{ 3899 Status: op.props.CloudContainerStatus.Status, 3900 StatusInfo: op.props.CloudContainerStatus.Message, 3901 StatusData: mgoutils.EscapeKeys(op.props.CloudContainerStatus.Data), 3902 Updated: now.UnixNano(), 3903 } 3904 3905 newStatusOps := createStatusOp(op.application.st, globalCloudContainerKey(name), doc) 3906 ops = append(ops, newStatusOps) 3907 } 3908 3909 return ops, nil 3910 } 3911 3912 // Done is part of the ModelOperation interface. 3913 func (op *AddUnitOperation) Done(err error) error { 3914 if err != nil { 3915 return errors.Annotatef(err, "adding unit to %q", op.application.Name()) 3916 } 3917 if op.props.AgentStatus == nil && op.props.CloudContainerStatus == nil { 3918 return nil 3919 } 3920 // We do a separate status update here because we require all units to be 3921 // created as "allocating". If the add operation specifies a status, 3922 // that status is used to update the initial "allocating" status. We then 3923 // get the expected 2 status entries in history. This is done in a separate 3924 // transaction; a failure here will effectively be retried because the worker 3925 // which has made the API call will restart and then perform the necessary update.. 3926 u, err := op.application.st.Unit(op.unitName) 3927 if err != nil { 3928 return errors.Trace(err) 3929 } 3930 if op.props.AgentStatus != nil { 3931 now := op.application.st.clock().Now() 3932 if err := u.Agent().SetStatus(status.StatusInfo{ 3933 Status: op.props.AgentStatus.Status, 3934 Message: op.props.AgentStatus.Message, 3935 Data: op.props.AgentStatus.Data, 3936 Since: &now, 3937 }); err != nil { 3938 return errors.Trace(err) 3939 } 3940 } 3941 if op.props.CloudContainerStatus != nil { 3942 doc := statusDoc{ 3943 Status: op.props.CloudContainerStatus.Status, 3944 StatusInfo: op.props.CloudContainerStatus.Message, 3945 StatusData: mgoutils.EscapeKeys(op.props.CloudContainerStatus.Data), 3946 Updated: timeOrNow(op.props.CloudContainerStatus.Since, u.st.clock()).UnixNano(), 3947 } 3948 _, err := probablyUpdateStatusHistory(op.application.st.db(), globalCloudContainerKey(op.unitName), doc) 3949 if err != nil { 3950 return errors.Trace(err) 3951 } 3952 3953 // Ensure unit history is updated correctly 3954 unitStatus, err := getStatus(op.application.st.db(), unitGlobalKey(op.unitName), "unit") 3955 if err != nil { 3956 return errors.Trace(err) 3957 } 3958 newHistory, err := caasHistoryRewriteDoc(unitStatus, *op.props.CloudContainerStatus, true, status.UnitDisplayStatus, op.application.st.clock()) 3959 if err != nil { 3960 return errors.Trace(err) 3961 } 3962 if newHistory != nil { 3963 err = setStatus(op.application.st.db(), setStatusParams{ 3964 badge: "unit", 3965 globalKey: unitGlobalKey(op.unitName), 3966 status: unitStatus.Status, 3967 message: unitStatus.Message, 3968 rawData: unitStatus.Data, 3969 updated: timeOrNow(unitStatus.Since, u.st.clock()), 3970 historyOverwrite: newHistory, 3971 }) 3972 if err != nil { 3973 return errors.Trace(err) 3974 } 3975 } 3976 } 3977 3978 return nil 3979 } 3980 3981 // UpdateCloudService updates the cloud service details for the application. 3982 func (a *Application) UpdateCloudService(providerId string, addresses []network.SpaceAddress) error { 3983 _, err := a.st.SaveCloudService(SaveCloudServiceArgs{ 3984 Id: a.Name(), 3985 ProviderId: providerId, 3986 Addresses: addresses, 3987 }) 3988 return errors.Trace(err) 3989 } 3990 3991 // ServiceInfo returns information about this application's cloud service. 3992 // This is only used for CAAS models. 3993 func (a *Application) ServiceInfo() (CloudServicer, error) { 3994 svc, err := a.st.CloudService(a.Name()) 3995 if err != nil { 3996 return nil, errors.Trace(err) 3997 } 3998 return svc, nil 3999 } 4000 4001 // UnitCount returns the of number of units for this application. 4002 func (a *Application) UnitCount() int { 4003 return a.doc.UnitCount 4004 } 4005 4006 // RelationCount returns the of number of active relations for this application. 4007 func (a *Application) RelationCount() int { 4008 return a.doc.RelationCount 4009 4010 } 4011 4012 // UnitNames returns the of this application's units. 4013 func (a *Application) UnitNames() ([]string, error) { 4014 u, err := appUnitNames(a.st, a.Name()) 4015 return u, errors.Trace(err) 4016 } 4017 4018 // CharmPendingToBeDownloaded returns true if the charm referenced by this 4019 // application is pending to be downloaded. 4020 func (a *Application) CharmPendingToBeDownloaded() bool { 4021 ch, _, err := a.Charm() 4022 if err != nil { 4023 return false 4024 } 4025 origin := a.CharmOrigin() 4026 if origin == nil { 4027 return false 4028 } 4029 // The charm may be downloaded, but the application's 4030 // data may not updated yet. This can happen when multiple 4031 // applications share a charm. 4032 notReady := origin.Source == "charm-hub" && origin.ID == "" 4033 return !ch.IsPlaceholder() && !ch.IsUploaded() || notReady 4034 } 4035 4036 func appUnitNames(st *State, appName string) ([]string, error) { 4037 unitsCollection, closer := st.db().GetCollection(unitsC) 4038 defer closer() 4039 4040 var docs []struct { 4041 Name string `bson:"name"` 4042 } 4043 err := unitsCollection.Find(bson.D{{"application", appName}}).Select(bson.D{{"name", 1}}).All(&docs) 4044 if err != nil { 4045 return nil, errors.Trace(err) 4046 } 4047 4048 unitNames := make([]string, len(docs)) 4049 for i, doc := range docs { 4050 unitNames[i] = doc.Name 4051 } 4052 return unitNames, nil 4053 } 4054 4055 // WatchApplicationsWithPendingCharms returns a watcher that emits the IDs of 4056 // applications that have a charm origin populated and reference a charm that 4057 // is pending to be downloaded or the charm origin ID has not been filled in yet 4058 // for charm-hub charms. 4059 func (st *State) WatchApplicationsWithPendingCharms() StringsWatcher { 4060 return newCollectionWatcher(st, colWCfg{ 4061 col: applicationsC, 4062 filter: func(key interface{}) bool { 4063 sKey, ok := key.(string) 4064 if !ok { 4065 return false 4066 } 4067 4068 // We need an application with both a charm URL and 4069 // an origin set. 4070 app, _ := st.Application(st.localID(sKey)) 4071 if app == nil { 4072 return false 4073 } 4074 return app.CharmPendingToBeDownloaded() 4075 }, 4076 // We want to be notified for application documents as soon as 4077 // they appear in the collection. As the revno for inserted 4078 // docs is 0 we need to set the threshold to -1 so inserted 4079 // docs are not ignored by the watcher. 4080 revnoThreshold: -1, 4081 }) 4082 }