github.com/juju/juju@v0.0.0-20240327075706-a90865de2538/worker/uniter/util_test.go (about) 1 // Copyright 2012-2014 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package uniter_test 5 6 import ( 7 "bytes" 8 "fmt" 9 "os" 10 "path/filepath" 11 "reflect" 12 "sort" 13 "strconv" 14 "strings" 15 "sync" 16 "time" 17 18 pebbleclient "github.com/canonical/pebble/client" 19 jujucharm "github.com/juju/charm/v12" 20 "github.com/juju/clock" 21 "github.com/juju/clock/testclock" 22 "github.com/juju/collections/set" 23 "github.com/juju/errors" 24 "github.com/juju/loggo" 25 "github.com/juju/mutex/v2" 26 "github.com/juju/names/v5" 27 gt "github.com/juju/testing" 28 jc "github.com/juju/testing/checkers" 29 ft "github.com/juju/testing/filetesting" 30 "github.com/juju/utils/v3" 31 "github.com/juju/worker/v3" 32 gc "gopkg.in/check.v1" 33 34 "github.com/juju/juju/api" 35 "github.com/juju/juju/api/agent/secretsmanager" 36 apiuniter "github.com/juju/juju/api/agent/uniter" 37 "github.com/juju/juju/api/client/charms" 38 corearch "github.com/juju/juju/core/arch" 39 corecharm "github.com/juju/juju/core/charm" 40 "github.com/juju/juju/core/instance" 41 "github.com/juju/juju/core/leadership" 42 corelease "github.com/juju/juju/core/lease" 43 "github.com/juju/juju/core/machinelock" 44 "github.com/juju/juju/core/model" 45 "github.com/juju/juju/core/network" 46 resourcetesting "github.com/juju/juju/core/resources/testing" 47 "github.com/juju/juju/core/secrets" 48 "github.com/juju/juju/core/status" 49 "github.com/juju/juju/core/watcher/watchertest" 50 "github.com/juju/juju/juju/testing" 51 jujusecrets "github.com/juju/juju/secrets" 52 _ "github.com/juju/juju/secrets/provider/all" 53 "github.com/juju/juju/state" 54 "github.com/juju/juju/state/storage" 55 "github.com/juju/juju/testcharms" 56 coretesting "github.com/juju/juju/testing" 57 jworker "github.com/juju/juju/worker" 58 "github.com/juju/juju/worker/fortress" 59 "github.com/juju/juju/worker/uniter" 60 "github.com/juju/juju/worker/uniter/charm" 61 "github.com/juju/juju/worker/uniter/operation" 62 "github.com/juju/juju/worker/uniter/remotestate" 63 "github.com/juju/juju/worker/uniter/runner" 64 runnercontext "github.com/juju/juju/worker/uniter/runner/context" 65 ) 66 67 var ( 68 // (achilleasa) 2019-10-11: 69 // These addresses must always be IPs. If not, the facade code 70 // (NetworksForRelation in particular) will attempt to resolve them and 71 // cause the uniter tests to fail with an "unknown host" error. 72 dummyPrivateAddress = network.NewSpaceAddress("172.0.30.1", network.WithScope(network.ScopeCloudLocal)) 73 dummyPublicAddress = network.NewSpaceAddress("1.1.1.1", network.WithScope(network.ScopePublic)) 74 ) 75 76 // worstCase is used for timeouts when timing out 77 // will fail the test. Raising this value should 78 // not affect the overall running time of the tests 79 // unless they fail. 80 const worstCase = 100 * coretesting.LongWait 81 82 // Assign the unit to a provisioned machine with dummy addresses set. 83 func assertAssignUnit(c *gc.C, st *state.State, u *state.Unit) { 84 err := u.AssignToNewMachine() 85 c.Assert(err, jc.ErrorIsNil) 86 mid, err := u.AssignedMachineId() 87 c.Assert(err, jc.ErrorIsNil) 88 machine, err := st.Machine(mid) 89 c.Assert(err, jc.ErrorIsNil) 90 err = machine.SetProvisioned("i-exist", "", "fake_nonce", nil) 91 c.Assert(err, jc.ErrorIsNil) 92 err = machine.SetProviderAddresses(dummyPrivateAddress, dummyPublicAddress) 93 c.Assert(err, jc.ErrorIsNil) 94 } 95 96 // Assign the unit to a provisioned machine with dummy addresses set. 97 func assertAssignUnitLXDContainer(c *gc.C, st *state.State, u *state.Unit) { 98 machine, err := st.AddMachineInsideNewMachine( 99 state.MachineTemplate{ 100 Base: state.UbuntuBase("12.10"), 101 Jobs: []state.MachineJob{state.JobHostUnits}, 102 }, 103 state.MachineTemplate{ // parent 104 Base: state.UbuntuBase("12.10"), 105 Jobs: []state.MachineJob{state.JobHostUnits}, 106 }, 107 instance.LXD, 108 ) 109 c.Assert(err, jc.ErrorIsNil) 110 err = u.AssignToMachine(machine) 111 c.Assert(err, jc.ErrorIsNil) 112 err = machine.SetProvisioned("i-exist", "", "fake_nonce", nil) 113 c.Assert(err, jc.ErrorIsNil) 114 err = machine.SetProviderAddresses(dummyPrivateAddress, dummyPublicAddress) 115 c.Assert(err, jc.ErrorIsNil) 116 } 117 118 type testContext struct { 119 uuid string 120 path string 121 dataDir string 122 s *UniterSuite 123 st *state.State 124 api *apiuniter.State 125 resources *apiuniter.ResourcesFacadeClient 126 payloads *apiuniter.PayloadFacadeClient 127 apiConn api.Connection 128 leaseManager corelease.Manager 129 leaderTracker *mockLeaderTracker 130 charmDirGuard *mockCharmDirGuard 131 charms map[string][]byte 132 hooks []string 133 sch *state.Charm 134 application *state.Application 135 unit *state.Unit 136 uniter *uniter.Uniter 137 relatedApplication *state.Application 138 relation *state.Relation 139 relationUnits map[string]*state.RelationUnit 140 subordinate *state.Unit 141 createdSecretURI *secrets.URI 142 updateStatusHookTicker *manualTicker 143 containerNames []string 144 pebbleClients map[string]*fakePebbleClient 145 secretsRotateCh chan []string 146 secretsExpireCh chan []string 147 secretsClient *secretsmanager.Client 148 secretBackends jujusecrets.BackendsClient 149 err string 150 151 mu sync.Mutex 152 hooksCompleted []string 153 runner *mockRunner 154 deployer *mockDeployer 155 } 156 157 var _ uniter.UniterExecutionObserver = (*testContext)(nil) 158 159 // HookCompleted implements the UniterExecutionObserver interface. 160 func (ctx *testContext) HookCompleted(hookName string) { 161 ctx.mu.Lock() 162 ctx.hooksCompleted = append(ctx.hooksCompleted, hookName) 163 ctx.mu.Unlock() 164 } 165 166 // HookFailed implements the UniterExecutionObserver interface. 167 func (ctx *testContext) HookFailed(hookName string) { 168 ctx.mu.Lock() 169 ctx.hooksCompleted = append(ctx.hooksCompleted, "fail-"+hookName) 170 ctx.mu.Unlock() 171 } 172 173 func (ctx *testContext) setExpectedError(err string) { 174 ctx.mu.Lock() 175 ctx.err = err 176 ctx.mu.Unlock() 177 } 178 179 func (ctx *testContext) run(c *gc.C, steps []stepper) { 180 defer func() { 181 if ctx.uniter != nil { 182 err := worker.Stop(ctx.uniter) 183 if ctx.err == "" { 184 if errors.Cause(err) == mutex.ErrCancelled { 185 // This can happen if the uniter lock acquire was 186 // temporarily blocked by test code holding the 187 // lock (like in waitHooks). The acquire call is 188 // delaying but then gets cancelled, and that 189 // error bubbles up to here. 190 // lp:1635664 191 c.Logf("ignoring lock acquire cancelled by stop") 192 return 193 } 194 c.Assert(err, jc.ErrorIsNil) 195 } else { 196 c.Assert(err, gc.ErrorMatches, ctx.err) 197 } 198 } 199 }() 200 for i, s := range steps { 201 c.Logf("step %d:\n", i) 202 step(c, ctx, s) 203 } 204 } 205 206 func (ctx *testContext) apiLogin(c *gc.C) { 207 password, err := utils.RandomPassword() 208 c.Assert(err, jc.ErrorIsNil) 209 err = ctx.unit.SetPassword(password) 210 c.Assert(err, jc.ErrorIsNil) 211 apiConn := ctx.s.OpenAPIAs(c, ctx.unit.Tag(), password) 212 c.Assert(apiConn, gc.NotNil) 213 c.Logf("API: login as %q successful", ctx.unit.Tag()) 214 testApi, err := apiuniter.NewFromConnection(apiConn) 215 c.Assert(err, jc.ErrorIsNil) 216 c.Assert(testApi, gc.NotNil) 217 ctx.api = testApi 218 ctx.payloads = apiuniter.NewPayloadFacadeClient(apiConn) 219 resourcesApi, err := apiuniter.NewResourcesFacadeClient(apiConn, ctx.unit.UnitTag()) 220 c.Assert(err, jc.ErrorIsNil) 221 ctx.resources = resourcesApi 222 ctx.apiConn = apiConn 223 ctx.leaderTracker = newMockLeaderTracker(ctx) 224 ctx.leaderTracker.setLeader(c, true) 225 ctx.secretsClient = secretsmanager.NewClient(apiConn) 226 ctx.secretBackends, err = jujusecrets.NewClient(ctx.secretsClient) 227 c.Assert(err, jc.ErrorIsNil) 228 } 229 230 func (ctx *testContext) matchHooks(c *gc.C) (match, cannotMatch, overshoot bool) { 231 ctx.mu.Lock() 232 defer ctx.mu.Unlock() 233 c.Logf("actual hooks: %#v", ctx.hooksCompleted) 234 c.Logf("expected hooks: %#v", ctx.hooks) 235 236 // If hooks are automatically retried, this may cause stutter in the actual observed 237 // hooks depending on timing of the test steps. For the purposes of evaluating expected 238 // hooks, the loop below skips over any retried, failed hooks 239 // (up to the allowed retry limit for tests which is at most 2 in practice). 240 241 const allowedHookRetryCount = 2 242 243 previousFailedHook := "" 244 retryCount := 0 245 totalDuplicateFails := 0 246 numCompletedHooks := len(ctx.hooksCompleted) 247 numExpectedHooks := len(ctx.hooks) 248 249 for hooksIndex := 0; hooksIndex < numExpectedHooks; { 250 hooksCompletedIndex := hooksIndex + totalDuplicateFails 251 if hooksCompletedIndex >= len(ctx.hooksCompleted) { 252 // not all hooks have fired yet 253 return false, false, false 254 } 255 completedHook := ctx.hooksCompleted[hooksCompletedIndex] 256 if completedHook != ctx.hooks[hooksIndex] { 257 if completedHook == previousFailedHook && retryCount < allowedHookRetryCount { 258 retryCount++ 259 totalDuplicateFails++ 260 continue 261 } 262 cannotMatch = true 263 return false, cannotMatch, false 264 } 265 hooksIndex++ 266 if strings.HasPrefix(completedHook, "fail-") { 267 previousFailedHook = completedHook 268 } else { 269 retryCount = 0 270 previousFailedHook = "" 271 } 272 } 273 274 // Ensure any duplicate hook failures at the end of the sequence are counted. 275 for i := 0; i < numCompletedHooks-numExpectedHooks; i++ { 276 if ctx.hooksCompleted[numExpectedHooks+i] != previousFailedHook { 277 break 278 } 279 totalDuplicateFails++ 280 } 281 return true, false, numCompletedHooks > numExpectedHooks+totalDuplicateFails 282 } 283 284 type uniterTest struct { 285 summary string 286 steps []stepper 287 } 288 289 func ut(summary string, steps ...stepper) uniterTest { 290 return uniterTest{summary, steps} 291 } 292 293 type stepper interface { 294 step(c *gc.C, ctx *testContext) 295 } 296 297 func step(c *gc.C, ctx *testContext, s stepper) { 298 c.Logf("%#v", s) 299 s.step(c, ctx) 300 } 301 302 type ensureStateWorker struct{} 303 304 func (s ensureStateWorker) step(c *gc.C, ctx *testContext) { 305 addresses, err := ctx.st.Addresses() 306 if err != nil || len(addresses) == 0 { 307 addControllerMachine(c, ctx.st) 308 } 309 } 310 311 func addControllerMachine(c *gc.C, st *state.State) { 312 // The AddControllerMachine call will update the API host ports 313 // to made-up addresses. We need valid addresses so that the uniter 314 // can download charms from the API server. 315 apiHostPorts, err := st.APIHostPortsForClients() 316 c.Assert(err, gc.IsNil) 317 testing.AddControllerMachine(c, st) 318 err = st.SetAPIHostPorts(apiHostPorts) 319 c.Assert(err, gc.IsNil) 320 } 321 322 type createCharm struct { 323 revision int 324 badHooks []string 325 customize func(*gc.C, *testContext, string) 326 } 327 328 func startupHooks(minion bool) []string { 329 leaderHook := "leader-elected" 330 if minion { 331 leaderHook = "leader-settings-changed" 332 } 333 return []string{"install", leaderHook, "config-changed", "start"} 334 } 335 336 func (s createCharm) step(c *gc.C, ctx *testContext) { 337 base := testcharms.Repo.ClonedDirPath(c.MkDir(), "wordpress") 338 if s.customize != nil { 339 s.customize(c, ctx, base) 340 } 341 if len(s.badHooks) > 0 { 342 ctx.runner.hooksWithErrors = set.NewStrings(s.badHooks...) 343 } 344 dir, err := jujucharm.ReadCharmDir(base) 345 c.Assert(err, jc.ErrorIsNil) 346 err = dir.SetDiskRevision(s.revision) 347 c.Assert(err, jc.ErrorIsNil) 348 step(c, ctx, addCharm{dir, curl(s.revision)}) 349 } 350 351 type addCharm struct { 352 dir *jujucharm.CharmDir 353 curl string 354 } 355 356 func (s addCharm) step(c *gc.C, ctx *testContext) { 357 var buf bytes.Buffer 358 err := s.dir.ArchiveTo(&buf) 359 c.Assert(err, jc.ErrorIsNil) 360 body := buf.Bytes() 361 hash, _, err := utils.ReadSHA256(&buf) 362 c.Assert(err, jc.ErrorIsNil) 363 364 storagePath := fmt.Sprintf("/charms/%s/%d", s.dir.Meta().Name, s.dir.Revision()) 365 ctx.charms[storagePath] = body 366 info := state.CharmInfo{ 367 Charm: s.dir, 368 ID: s.curl, 369 StoragePath: storagePath, 370 SHA256: hash, 371 } 372 373 ctx.sch, err = ctx.st.AddCharm(info) 374 c.Assert(err, jc.ErrorIsNil) 375 } 376 377 type serveCharm struct{} 378 379 func (s serveCharm) step(c *gc.C, ctx *testContext) { 380 testStorage := storage.NewStorage(ctx.st.ModelUUID(), ctx.st.MongoSession()) 381 for storagePath, data := range ctx.charms { 382 err := testStorage.Put(storagePath, bytes.NewReader(data), int64(len(data))) 383 c.Assert(err, jc.ErrorIsNil) 384 delete(ctx.charms, storagePath) 385 } 386 } 387 388 type addCharmProfileToMachine struct { 389 profiles []string 390 } 391 392 func (acpm addCharmProfileToMachine) step(c *gc.C, ctx *testContext) { 393 machineId, err := ctx.unit.AssignedMachineId() 394 c.Assert(err, jc.ErrorIsNil) 395 machine, err := ctx.st.Machine(machineId) 396 c.Assert(err, jc.ErrorIsNil) 397 err = machine.SetCharmProfiles(acpm.profiles) 398 c.Assert(err, jc.ErrorIsNil) 399 } 400 401 type createApplicationAndUnit struct { 402 applicationName string 403 storage map[string]state.StorageConstraints 404 container bool 405 } 406 407 func (csau createApplicationAndUnit) step(c *gc.C, ctx *testContext) { 408 if csau.applicationName == "" { 409 csau.applicationName = "u" 410 } 411 sch, err := ctx.st.Charm(curl(0)) 412 c.Assert(err, jc.ErrorIsNil) 413 app := ctx.s.AddTestingApplicationWithStorage(c, csau.applicationName, sch, csau.storage) 414 unit, err := app.AddUnit(state.AddUnitParams{}) 415 c.Assert(err, jc.ErrorIsNil) 416 err = unit.SetCharmURL(curl(0)) 417 c.Assert(err, jc.ErrorIsNil) 418 419 // Assign the unit to a provisioned machine to match expected state. 420 if csau.container { 421 assertAssignUnitLXDContainer(c, ctx.st, unit) 422 } else { 423 assertAssignUnit(c, ctx.st, unit) 424 } 425 426 ctx.application = app 427 ctx.unit = unit 428 429 ctx.apiLogin(c) 430 } 431 432 type deleteUnit struct{} 433 434 func (d deleteUnit) step(c *gc.C, ctx *testContext) { 435 ctx.unit.DestroyWithForce(true, time.Duration(0)) 436 } 437 438 type createUniter struct { 439 minion bool 440 executorFunc uniter.NewOperationExecutorFunc 441 translateResolverErr func(error) error 442 } 443 444 func (s createUniter) step(c *gc.C, ctx *testContext) { 445 step(c, ctx, ensureStateWorker{}) 446 step(c, ctx, createApplicationAndUnit{}) 447 if s.minion { 448 step(c, ctx, forceMinion{}) 449 } 450 step(c, ctx, startUniter{ 451 newExecutorFunc: s.executorFunc, 452 translateResolverErr: s.translateResolverErr, 453 unitTag: ctx.unit.Tag().String(), 454 }) 455 step(c, ctx, waitAddresses{}) 456 } 457 458 type waitAddresses struct{} 459 460 func (waitAddresses) step(c *gc.C, ctx *testContext) { 461 timeout := time.After(worstCase) 462 for { 463 select { 464 case <-timeout: 465 c.Fatalf("timed out waiting for unit addresses") 466 case <-time.After(coretesting.ShortWait): 467 err := ctx.unit.Refresh() 468 if err != nil { 469 c.Fatalf("unit refresh failed: %v", err) 470 } 471 // GZ 2013-07-10: Hardcoded values from dummy environ 472 // special cased here, questionable. 473 private, _ := ctx.unit.PrivateAddress() 474 if private.Value != dummyPrivateAddress.Value { 475 continue 476 } 477 public, _ := ctx.unit.PublicAddress() 478 if public.Value != dummyPublicAddress.Value { 479 continue 480 } 481 return 482 } 483 } 484 } 485 486 type startUniter struct { 487 unitTag string 488 newExecutorFunc uniter.NewOperationExecutorFunc 489 translateResolverErr func(error) error 490 rebootQuerier uniter.RebootQuerier 491 } 492 493 type fakeRebootQuerier struct { 494 rebootDetected bool 495 } 496 497 func (q fakeRebootQuerier) Query(names.Tag) (bool, error) { 498 return q.rebootDetected, nil 499 } 500 501 type fakeRebootQuerierTrueOnce struct { 502 times int 503 result map[int]bool 504 } 505 506 func (q *fakeRebootQuerierTrueOnce) Query(_ names.Tag) (bool, error) { 507 retVal := q.result[q.times] 508 q.times += 1 509 return retVal, nil 510 } 511 512 // mimicRealRebootQuerier returns a reboot querier which mimics 513 // the behavior of the uniter without a reboot. 514 func mimicRealRebootQuerier() uniter.RebootQuerier { 515 return &fakeRebootQuerierTrueOnce{result: map[int]bool{0: rebootDetected, 1: rebootNotDetected, 2: rebootNotDetected}} 516 } 517 518 func (s startUniter) step(c *gc.C, ctx *testContext) { 519 if s.unitTag == "" { 520 s.unitTag = "unit-u-0" 521 } 522 if ctx.uniter != nil { 523 panic("don't start two uniters!") 524 } 525 if ctx.api == nil { 526 panic("API connection not established") 527 } 528 if ctx.resources == nil { 529 panic("resources API connection not established") 530 } 531 if ctx.payloads == nil { 532 panic("payloads API connection not established") 533 } 534 535 if ctx.runner == nil { 536 panic("process runner not set up") 537 } 538 if ctx.runner == nil { 539 panic("deployer not set up") 540 } 541 if s.rebootQuerier == nil { 542 s.rebootQuerier = mimicRealRebootQuerier() 543 } 544 tag, err := names.ParseUnitTag(s.unitTag) 545 if err != nil { 546 panic(err.Error()) 547 } 548 downloader := charms.NewCharmDownloader(ctx.apiConn) 549 operationExecutor := operation.NewExecutor 550 if s.newExecutorFunc != nil { 551 operationExecutor = s.newExecutorFunc 552 } 553 554 uniterParams := uniter.UniterParams{ 555 UniterFacade: ctx.api, 556 UnitTag: tag, 557 ModelType: model.IAAS, 558 LeadershipTrackerFunc: func(_ names.UnitTag) leadership.TrackerWorker { 559 return ctx.leaderTracker 560 }, 561 PayloadFacade: ctx.payloads, 562 ResourcesFacade: ctx.resources, 563 CharmDirGuard: ctx.charmDirGuard, 564 DataDir: ctx.dataDir, 565 Downloader: downloader, 566 MachineLock: processLock, 567 UpdateStatusSignal: ctx.updateStatusHookTicker.ReturnTimer(), 568 NewOperationExecutor: operationExecutor, 569 NewProcessRunner: func(context runnercontext.Context, paths runnercontext.Paths, remoteExecutor runner.ExecFunc) runner.Runner { 570 ctx.runner.ctx = context 571 return ctx.runner 572 }, 573 NewDeployer: func(charmPath, dataPath string, bundles charm.BundleReader, logger charm.Logger) (charm.Deployer, error) { 574 ctx.deployer.charmPath = charmPath 575 ctx.deployer.dataPath = dataPath 576 ctx.deployer.bundles = bundles 577 return ctx.deployer, nil 578 }, 579 TranslateResolverErr: s.translateResolverErr, 580 Observer: ctx, 581 // TODO(axw) 2015-11-02 #1512191 582 // update tests that rely on timing to advance clock 583 // appropriately. 584 Clock: clock.WallClock, 585 RebootQuerier: s.rebootQuerier, 586 Logger: loggo.GetLogger("test"), 587 ContainerNames: ctx.containerNames, 588 NewPebbleClient: func(cfg *pebbleclient.Config) (uniter.PebbleClient, error) { 589 res := pebbleSocketPathRegexp.FindAllStringSubmatch(cfg.Socket, 1) 590 if res == nil { 591 return nil, errors.NotFoundf("container") 592 } 593 client, ok := ctx.pebbleClients[res[0][1]] 594 if !ok { 595 return nil, errors.NotFoundf("container") 596 } 597 return client, nil 598 }, 599 SecretRotateWatcherFunc: func(u names.UnitTag, isLeader bool, secretsChanged chan []string) (worker.Worker, error) { 600 c.Assert(u.String(), gc.Equals, s.unitTag) 601 ctx.secretsRotateCh = secretsChanged 602 return watchertest.NewMockStringsWatcher(ctx.secretsRotateCh), nil 603 }, 604 SecretExpiryWatcherFunc: func(u names.UnitTag, isLeader bool, secretsChanged chan []string) (worker.Worker, error) { 605 c.Assert(u.String(), gc.Equals, s.unitTag) 606 ctx.secretsExpireCh = secretsChanged 607 return watchertest.NewMockStringsWatcher(ctx.secretsExpireCh), nil 608 }, 609 SecretsClient: ctx.secretsClient, 610 SecretsBackendGetter: func() (jujusecrets.BackendsClient, error) { 611 return ctx.secretBackends, nil 612 }, 613 } 614 ctx.uniter, err = uniter.NewUniter(&uniterParams) 615 c.Assert(err, jc.ErrorIsNil) 616 } 617 618 type waitUniterDead struct { 619 err string 620 } 621 622 func (s waitUniterDead) step(c *gc.C, ctx *testContext) { 623 if s.err != "" { 624 err := s.waitDead(c, ctx) 625 c.Log(errors.ErrorStack(err)) 626 c.Assert(err, gc.ErrorMatches, s.err) 627 return 628 } 629 630 // In the default case, we're waiting for worker.ErrTerminateAgent, but 631 // the path to that error can be tricky. If the unit becomes Dead at an 632 // inconvenient time, unrelated calls can fail -- as they should -- but 633 // not be detected as worker.ErrTerminateAgent. In this case, we restart 634 // the uniter and check that it fails as expected when starting up; this 635 // mimics the behaviour of the unit agent and verifies that the UA will, 636 // eventually, see the correct error and respond appropriately. 637 err := s.waitDead(c, ctx) 638 if err != jworker.ErrTerminateAgent { 639 step(c, ctx, startUniter{}) 640 err = s.waitDead(c, ctx) 641 } 642 c.Assert(err, gc.Equals, jworker.ErrTerminateAgent) 643 err = ctx.unit.Refresh() 644 c.Assert(err, jc.ErrorIsNil) 645 c.Assert(ctx.unit.Life(), gc.Equals, state.Dead) 646 } 647 648 func (s waitUniterDead) waitDead(c *gc.C, ctx *testContext) error { 649 u := ctx.uniter 650 ctx.uniter = nil 651 652 wait := make(chan error, 1) 653 go func() { 654 wait <- u.Wait() 655 }() 656 657 select { 658 case err := <-wait: 659 return err 660 case <-time.After(worstCase): 661 u.Kill() 662 c.Fatalf("uniter still alive") 663 } 664 panic("unreachable") 665 } 666 667 type stopUniter struct { 668 err string 669 } 670 671 func (s stopUniter) step(c *gc.C, ctx *testContext) { 672 u := ctx.uniter 673 if u == nil { 674 c.Logf("uniter not started, skipping stopUniter{}") 675 return 676 } 677 ctx.uniter = nil 678 err := worker.Stop(u) 679 if s.err == "" { 680 c.Assert(err, jc.ErrorIsNil) 681 } else { 682 c.Assert(err, gc.ErrorMatches, s.err) 683 } 684 } 685 686 type verifyWaiting struct{} 687 688 func (s verifyWaiting) step(c *gc.C, ctx *testContext) { 689 step(c, ctx, stopUniter{}) 690 step(c, ctx, startUniter{rebootQuerier: fakeRebootQuerier{rebootNotDetected}}) 691 step(c, ctx, waitHooks{}) 692 } 693 694 type verifyRunning struct { 695 minion bool 696 } 697 698 func (s verifyRunning) step(c *gc.C, ctx *testContext) { 699 step(c, ctx, stopUniter{}) 700 step(c, ctx, startUniter{rebootQuerier: fakeRebootQuerier{rebootNotDetected}}) 701 var hooks []string 702 if s.minion { 703 hooks = append(hooks, "leader-settings-changed") 704 } 705 // We don't expect config-changed to always run on agent restart 706 // anymore. 707 step(c, ctx, waitHooks(hooks)) 708 } 709 710 type startupError struct { 711 badHook string 712 } 713 714 func (s startupError) step(c *gc.C, ctx *testContext) { 715 step(c, ctx, createCharm{badHooks: []string{s.badHook}}) 716 step(c, ctx, serveCharm{}) 717 step(c, ctx, createUniter{}) 718 step(c, ctx, waitUnitAgent{ 719 statusGetter: unitStatusGetter, 720 status: status.Error, 721 info: fmt.Sprintf(`hook failed: %q`, s.badHook), 722 }) 723 for _, hook := range startupHooks(false) { 724 if hook == s.badHook { 725 step(c, ctx, waitHooks{"fail-" + hook}) 726 break 727 } 728 step(c, ctx, waitHooks{hook}) 729 } 730 step(c, ctx, verifyCharm{}) 731 } 732 733 type verifyDeployed struct{} 734 735 func (s verifyDeployed) step(c *gc.C, ctx *testContext) { 736 c.Assert(ctx.deployer.staged, jc.DeepEquals, curl(0)) 737 c.Assert(ctx.deployer.deployed, jc.IsTrue) 738 } 739 740 type quickStart struct { 741 minion bool 742 } 743 744 func (s quickStart) step(c *gc.C, ctx *testContext) { 745 step(c, ctx, createCharm{}) 746 step(c, ctx, serveCharm{}) 747 step(c, ctx, createUniter{minion: s.minion}) 748 step(c, ctx, waitUnitAgent{status: status.Idle}) 749 step(c, ctx, waitHooks(startupHooks(s.minion))) 750 step(c, ctx, verifyCharm{}) 751 } 752 753 type quickStartRelation struct{} 754 755 func (s quickStartRelation) step(c *gc.C, ctx *testContext) { 756 step(c, ctx, quickStart{}) 757 step(c, ctx, addRelation{}) 758 step(c, ctx, addRelationUnit{}) 759 step(c, ctx, waitHooks{"db-relation-joined mysql/0 db:0", "db-relation-changed mysql/0 db:0"}) 760 step(c, ctx, verifyRunning{}) 761 } 762 763 type startupRelationError struct { 764 badHook string 765 } 766 767 func (s startupRelationError) step(c *gc.C, ctx *testContext) { 768 step(c, ctx, createCharm{badHooks: []string{s.badHook}}) 769 step(c, ctx, serveCharm{}) 770 step(c, ctx, createUniter{}) 771 step(c, ctx, waitUnitAgent{status: status.Idle}) 772 step(c, ctx, waitHooks(startupHooks(false))) 773 step(c, ctx, verifyCharm{}) 774 step(c, ctx, addRelation{}) 775 step(c, ctx, addRelationUnit{}) 776 } 777 778 type resolveError struct { 779 resolved state.ResolvedMode 780 } 781 782 func (s resolveError) step(c *gc.C, ctx *testContext) { 783 err := ctx.unit.SetResolved(s.resolved) 784 c.Assert(err, jc.ErrorIsNil) 785 } 786 787 type statusfunc func() (status.StatusInfo, error) 788 789 var unitStatusGetter = func(ctx *testContext) statusfunc { 790 return func() (status.StatusInfo, error) { 791 return ctx.unit.Status() 792 } 793 } 794 795 var agentStatusGetter = func(ctx *testContext) statusfunc { 796 return func() (status.StatusInfo, error) { 797 return ctx.unit.AgentStatus() 798 } 799 } 800 801 type waitUnitAgent struct { 802 statusGetter func(ctx *testContext) statusfunc 803 status status.Status 804 info string 805 data map[string]interface{} 806 charm int 807 resolved state.ResolvedMode 808 } 809 810 func (s waitUnitAgent) step(c *gc.C, ctx *testContext) { 811 if s.statusGetter == nil { 812 s.statusGetter = agentStatusGetter 813 } 814 timeout := time.After(worstCase) 815 for { 816 817 select { 818 case <-time.After(coretesting.ShortWait): 819 err := ctx.unit.Refresh() 820 if err != nil { 821 c.Fatalf("cannot refresh unit: %v", err) 822 } 823 resolved := ctx.unit.Resolved() 824 if resolved != s.resolved { 825 c.Logf("want resolved mode %q, got %q; still waiting", s.resolved, resolved) 826 continue 827 } 828 url := ctx.unit.CharmURL() 829 if url == nil { 830 c.Logf("want unit charm %q, got nil; still waiting", curl(s.charm)) 831 continue 832 } 833 if *url != curl(s.charm) { 834 c.Logf("want unit charm %q, got %q; still waiting", curl(s.charm), url) 835 continue 836 } 837 statusInfo, err := s.statusGetter(ctx)() 838 c.Assert(err, jc.ErrorIsNil) 839 if string(statusInfo.Status) != string(s.status) { 840 c.Logf("want unit status %q, got %q; still waiting", s.status, statusInfo.Status) 841 continue 842 } 843 if statusInfo.Message != s.info { 844 c.Logf("want unit status info %q, got %q; still waiting", s.info, statusInfo.Message) 845 continue 846 } 847 if s.data != nil { 848 if len(statusInfo.Data) != len(s.data) { 849 wantKeys := []string{} 850 for k := range s.data { 851 wantKeys = append(wantKeys, k) 852 } 853 sort.Strings(wantKeys) 854 gotKeys := []string{} 855 for k := range statusInfo.Data { 856 gotKeys = append(gotKeys, k) 857 } 858 sort.Strings(gotKeys) 859 c.Logf("want {%s} status data value(s), got {%s}; still waiting", strings.Join(wantKeys, ", "), strings.Join(gotKeys, ", ")) 860 continue 861 } 862 for key, value := range s.data { 863 if statusInfo.Data[key] != value { 864 c.Logf("want status data value %q for key %q, got %q; still waiting", 865 value, key, statusInfo.Data[key]) 866 continue 867 } 868 } 869 } 870 return 871 case <-timeout: 872 c.Fatalf("never reached desired status") 873 } 874 } 875 } 876 877 type waitHooks []string 878 879 func (s waitHooks) step(c *gc.C, ctx *testContext) { 880 if len(s) == 0 { 881 // Give unwanted hooks a moment to run... 882 883 time.Sleep(coretesting.ShortWait) 884 } 885 ctx.hooks = append(ctx.hooks, s...) 886 c.Logf("waiting for hooks: %#v", ctx.hooks) 887 match, cannotMatch, overshoot := ctx.matchHooks(c) 888 if overshoot && len(s) == 0 { 889 c.Fatalf("ran more hooks than expected") 890 } 891 if cannotMatch { 892 c.Fatalf("hooks did not match expected") 893 } 894 waitExecutionLockReleased := func() { 895 timeout := make(chan struct{}) 896 go func() { 897 <-time.After(worstCase) 898 close(timeout) 899 }() 900 releaser, err := processLock.Acquire(machinelock.Spec{ 901 Worker: "uniter-test", 902 Comment: "waitHooks", 903 Cancel: timeout, 904 }) 905 if err != nil { 906 c.Fatalf("failed to acquire execution lock: %v", err) 907 } 908 releaser() 909 } 910 if match { 911 if len(s) > 0 { 912 // only check for lock release if there were hooks 913 // run; hooks *not* running may be due to the lock 914 // being held. 915 waitExecutionLockReleased() 916 } 917 return 918 } 919 timeout := time.After(worstCase) 920 for { 921 select { 922 case <-time.After(coretesting.ShortWait): 923 if match, cannotMatch, _ = ctx.matchHooks(c); match { 924 waitExecutionLockReleased() 925 return 926 } else if cannotMatch { 927 c.Fatalf("unexpected hook triggered") 928 } 929 case <-timeout: 930 c.Fatalf("never got expected hooks") 931 } 932 } 933 } 934 935 type actionData struct { 936 actionName string 937 args []string 938 } 939 940 type waitActionInvocation struct { 941 expectedActions []actionData 942 } 943 944 func (s waitActionInvocation) step(c *gc.C, ctx *testContext) { 945 timeout := time.After(worstCase) 946 for { 947 select { 948 case <-time.After(coretesting.ShortWait): 949 ranActions := ctx.runner.ranActions() 950 if len(ranActions) != len(s.expectedActions) { 951 continue 952 } 953 assertActionsMatch(c, ranActions, s.expectedActions) 954 return 955 case <-timeout: 956 c.Fatalf("timed out waiting for action invocation") 957 } 958 } 959 } 960 961 func assertActionsMatch(c *gc.C, actualIn []actionData, expectIn []actionData) { 962 matches := 0 963 desiredMatches := len(actualIn) 964 c.Assert(len(actualIn), gc.Equals, len(expectIn)) 965 findMatch: 966 for _, expectedItem := range expectIn { 967 // find expectedItem in actualIn 968 for j, actualItem := range actualIn { 969 // If we find a match, remove both items from their 970 // respective slices, increment match count, and restart. 971 if reflect.DeepEqual(actualItem, expectedItem) { 972 actualIn = append(actualIn[:j], actualIn[j+1:]...) 973 matches++ 974 continue findMatch 975 } 976 } 977 // if we finish the whole thing without finding a match, we failed. 978 c.Assert(actualIn, jc.DeepEquals, expectIn) 979 } 980 c.Assert(matches, gc.Equals, desiredMatches) 981 } 982 983 type fixHook struct { 984 name string 985 } 986 987 func (s fixHook) step(_ *gc.C, ctx *testContext) { 988 if ctx.runner.hooksWithErrors != nil { 989 ctx.runner.hooksWithErrors.Remove(s.name) 990 } 991 } 992 993 type updateStatusHookTick struct{} 994 995 func (s updateStatusHookTick) step(c *gc.C, ctx *testContext) { 996 err := ctx.updateStatusHookTicker.Tick() 997 c.Assert(err, jc.ErrorIsNil) 998 } 999 1000 type changeConfig map[string]interface{} 1001 1002 func (s changeConfig) step(c *gc.C, ctx *testContext) { 1003 err := ctx.application.UpdateCharmConfig(model.GenerationMaster, jujucharm.Settings(s)) 1004 c.Assert(err, jc.ErrorIsNil) 1005 } 1006 1007 type addAction struct { 1008 name string 1009 params map[string]interface{} 1010 } 1011 1012 func (s addAction) step(c *gc.C, ctx *testContext) { 1013 m, err := ctx.st.Model() 1014 c.Assert(err, jc.ErrorIsNil) 1015 operationID, err := m.EnqueueOperation("a test", 1) 1016 c.Assert(err, jc.ErrorIsNil) 1017 _, err = m.EnqueueAction(operationID, ctx.unit.Tag(), s.name, s.params, false, "", nil) 1018 c.Assert(err, jc.ErrorIsNil) 1019 } 1020 1021 type upgradeCharm struct { 1022 revision int 1023 forced bool 1024 } 1025 1026 func (s upgradeCharm) step(c *gc.C, ctx *testContext) { 1027 curl := curl(s.revision) 1028 sch, err := ctx.st.Charm(curl) 1029 c.Assert(err, jc.ErrorIsNil) 1030 cfg := state.SetCharmConfig{ 1031 Charm: sch, 1032 ForceUnits: s.forced, 1033 CharmOrigin: defaultCharmOrigin(s.revision), 1034 } 1035 // Make sure we upload the charm before changing it in the DB. 1036 serveCharm{}.step(c, ctx) 1037 err = ctx.application.SetCharm(cfg) 1038 c.Assert(err, jc.ErrorIsNil) 1039 } 1040 1041 func defaultCharmOrigin(revision int) *state.CharmOrigin { 1042 // This functionality is highly depending on the local 1043 // curl function. Any changes must be made in both locations. 1044 source := corecharm.CharmHub.String() 1045 channel := &state.Channel{ 1046 Risk: "stable", 1047 } 1048 1049 platform := &state.Platform{ 1050 Architecture: corearch.DefaultArchitecture, 1051 OS: "ubuntu", 1052 Channel: "12.10", 1053 } 1054 1055 return &state.CharmOrigin{ 1056 Source: source, 1057 Type: "charm", 1058 Revision: intPtr(revision), 1059 Channel: channel, 1060 Platform: platform, 1061 } 1062 } 1063 1064 func intPtr(i int) *int { 1065 return &i 1066 } 1067 1068 type verifyCharm struct { 1069 revision int 1070 attemptedRevision int 1071 checkFiles ft.Entries 1072 } 1073 1074 func (s verifyCharm) step(c *gc.C, ctx *testContext) { 1075 s.checkFiles.Check(c, filepath.Join(ctx.path, "charm")) 1076 path := filepath.Join(ctx.path, "charm", "revision") 1077 content, err := os.ReadFile(path) 1078 c.Assert(err, jc.ErrorIsNil) 1079 c.Assert(string(content), gc.Equals, strconv.Itoa(s.revision)) 1080 checkRevision := s.revision 1081 if s.attemptedRevision > checkRevision { 1082 checkRevision = s.attemptedRevision 1083 } 1084 err = ctx.unit.Refresh() 1085 c.Assert(err, jc.ErrorIsNil) 1086 1087 url := ctx.unit.CharmURL() 1088 c.Assert(url, gc.NotNil) 1089 c.Assert(*url, gc.Equals, curl(checkRevision)) 1090 } 1091 1092 type pushResource struct{} 1093 1094 func (s pushResource) step(c *gc.C, ctx *testContext) { 1095 opened := resourcetesting.NewResource(c, >.Stub{}, "data", ctx.unit.ApplicationName(), "the bytes") 1096 1097 res := ctx.st.Resources() 1098 _, err := res.SetResource( 1099 ctx.unit.ApplicationName(), 1100 opened.Username, 1101 opened.Resource.Resource, 1102 opened.ReadCloser, 1103 state.IncrementCharmModifiedVersion, 1104 ) 1105 c.Assert(err, jc.ErrorIsNil) 1106 } 1107 1108 type startUpgradeError struct{} 1109 1110 func (s startUpgradeError) step(c *gc.C, ctx *testContext) { 1111 steps := []stepper{ 1112 createCharm{}, 1113 serveCharm{}, 1114 createUniter{}, 1115 waitUnitAgent{ 1116 status: status.Idle, 1117 }, 1118 waitHooks(startupHooks(false)), 1119 verifyCharm{}, 1120 1121 createCharm{ 1122 revision: 1, 1123 customize: func(c *gc.C, ctx *testContext, path string) { 1124 ctx.deployer.err = charm.ErrConflict 1125 }, 1126 }, 1127 serveCharm{}, 1128 upgradeCharm{revision: 1}, 1129 waitUnitAgent{ 1130 statusGetter: unitStatusGetter, 1131 status: status.Error, 1132 info: "upgrade failed", 1133 charm: 1, 1134 }, 1135 verifyWaiting{}, 1136 verifyCharm{attemptedRevision: 1}, 1137 } 1138 for _, s_ := range steps { 1139 step(c, ctx, s_) 1140 } 1141 } 1142 1143 type verifyWaitingUpgradeError struct { 1144 revision int 1145 } 1146 1147 func (s verifyWaitingUpgradeError) step(c *gc.C, ctx *testContext) { 1148 verifyCharmSteps := []stepper{ 1149 waitUnitAgent{ 1150 statusGetter: unitStatusGetter, 1151 status: status.Error, 1152 info: "upgrade failed", 1153 charm: s.revision, 1154 }, 1155 verifyCharm{attemptedRevision: s.revision}, 1156 } 1157 verifyWaitingSteps := []stepper{ 1158 stopUniter{}, 1159 custom{func(c *gc.C, ctx *testContext) { 1160 // By setting status to Idle, and waiting for the restarted uniter 1161 // to reset the error status, we can avoid a race in which a subsequent 1162 // fixUpgradeError lands just before the restarting uniter retries the 1163 // upgrade; and thus puts us in an unexpected state for future steps. 1164 now := time.Now() 1165 sInfo := status.StatusInfo{ 1166 Status: status.Idle, 1167 Message: "", 1168 Since: &now, 1169 } 1170 err := ctx.unit.SetAgentStatus(sInfo) 1171 c.Check(err, jc.ErrorIsNil) 1172 }}, 1173 startUniter{rebootQuerier: &fakeRebootQuerier{rebootNotDetected}}, 1174 } 1175 allSteps := append(verifyCharmSteps, verifyWaitingSteps...) 1176 allSteps = append(allSteps, verifyCharmSteps...) 1177 for _, s_ := range allSteps { 1178 step(c, ctx, s_) 1179 } 1180 } 1181 1182 type fixUpgradeError struct{} 1183 1184 func (s fixUpgradeError) step(_ *gc.C, ctx *testContext) { 1185 ctx.deployer.err = nil 1186 } 1187 1188 type addRelation struct { 1189 waitJoin bool 1190 } 1191 1192 func (s addRelation) step(c *gc.C, ctx *testContext) { 1193 if ctx.relation != nil { 1194 panic("don't add two relations!") 1195 } 1196 if ctx.relatedApplication == nil { 1197 ctx.relatedApplication = ctx.s.AddTestingApplication(c, "mysql", ctx.s.AddTestingCharm(c, "mysql")) 1198 } 1199 eps, err := ctx.st.InferEndpoints(ctx.application.Name(), "mysql") 1200 c.Assert(err, jc.ErrorIsNil) 1201 ctx.relation, err = ctx.st.AddRelation(eps...) 1202 c.Assert(err, jc.ErrorIsNil) 1203 ctx.relationUnits = map[string]*state.RelationUnit{} 1204 step(c, ctx, waitHooks{"db-relation-created mysql db:0"}) 1205 if !s.waitJoin { 1206 return 1207 } 1208 1209 // It's hard to do this properly (watching scope) without perturbing other tests. 1210 ru, err := ctx.relation.Unit(ctx.unit) 1211 c.Assert(err, jc.ErrorIsNil) 1212 timeout := time.After(worstCase) 1213 for { 1214 c.Logf("waiting to join relation") 1215 select { 1216 case <-timeout: 1217 c.Fatalf("failed to join relation") 1218 case <-time.After(coretesting.ShortWait): 1219 inScope, err := ru.InScope() 1220 c.Assert(err, jc.ErrorIsNil) 1221 if inScope { 1222 return 1223 } 1224 } 1225 } 1226 } 1227 1228 type addRelationUnit struct{} 1229 1230 func (s addRelationUnit) step(c *gc.C, ctx *testContext) { 1231 u, err := ctx.relatedApplication.AddUnit(state.AddUnitParams{}) 1232 c.Assert(err, jc.ErrorIsNil) 1233 ru, err := ctx.relation.Unit(u) 1234 c.Assert(err, jc.ErrorIsNil) 1235 err = ru.EnterScope(nil) 1236 c.Assert(err, jc.ErrorIsNil) 1237 ctx.relationUnits[u.Name()] = ru 1238 } 1239 1240 type changeRelationUnit struct { 1241 name string 1242 } 1243 1244 func (s changeRelationUnit) step(c *gc.C, ctx *testContext) { 1245 settings, err := ctx.relationUnits[s.name].Settings() 1246 c.Assert(err, jc.ErrorIsNil) 1247 key := "madness?" 1248 raw, _ := settings.Get(key) 1249 val, _ := raw.(string) 1250 if val == "" { 1251 val = "this is juju" 1252 } else { 1253 val += "u" 1254 } 1255 settings.Set(key, val) 1256 _, err = settings.Write() 1257 c.Assert(err, jc.ErrorIsNil) 1258 } 1259 1260 type removeRelationUnit struct { 1261 name string 1262 } 1263 1264 func (s removeRelationUnit) step(c *gc.C, ctx *testContext) { 1265 err := ctx.relationUnits[s.name].LeaveScope() 1266 c.Assert(err, jc.ErrorIsNil) 1267 ctx.relationUnits[s.name] = nil 1268 } 1269 1270 type relationState struct { 1271 removed bool 1272 life state.Life 1273 } 1274 1275 func (s relationState) step(c *gc.C, ctx *testContext) { 1276 err := ctx.relation.Refresh() 1277 if s.removed { 1278 c.Assert(err, jc.Satisfies, errors.IsNotFound) 1279 return 1280 } 1281 c.Assert(err, jc.ErrorIsNil) 1282 c.Assert(ctx.relation.Life(), gc.Equals, s.life) 1283 1284 } 1285 1286 type addSubordinateRelation struct { 1287 ifce string 1288 } 1289 1290 func (s addSubordinateRelation) step(c *gc.C, ctx *testContext) { 1291 if _, err := ctx.st.Application("logging"); errors.IsNotFound(err) { 1292 ctx.s.AddTestingApplication(c, "logging", ctx.s.AddTestingCharm(c, "logging")) 1293 } 1294 eps, err := ctx.st.InferEndpoints("logging", "u:"+s.ifce) 1295 c.Assert(err, jc.ErrorIsNil) 1296 _, err = ctx.st.AddRelation(eps...) 1297 c.Assert(err, jc.ErrorIsNil) 1298 } 1299 1300 type removeSubordinateRelation struct { 1301 ifce string 1302 } 1303 1304 func (s removeSubordinateRelation) step(c *gc.C, ctx *testContext) { 1305 eps, err := ctx.st.InferEndpoints("logging", "u:"+s.ifce) 1306 c.Assert(err, jc.ErrorIsNil) 1307 rel, err := ctx.st.EndpointsRelation(eps...) 1308 c.Assert(err, jc.ErrorIsNil) 1309 err = rel.Destroy() 1310 c.Assert(err, jc.ErrorIsNil) 1311 } 1312 1313 type waitSubordinateExists struct { 1314 name string 1315 } 1316 1317 func (s waitSubordinateExists) step(c *gc.C, ctx *testContext) { 1318 timeout := time.After(worstCase) 1319 for { 1320 1321 select { 1322 case <-timeout: 1323 c.Fatalf("subordinate was not created") 1324 case <-time.After(coretesting.ShortWait): 1325 var err error 1326 ctx.subordinate, err = ctx.st.Unit(s.name) 1327 if errors.IsNotFound(err) { 1328 continue 1329 } 1330 c.Assert(err, jc.ErrorIsNil) 1331 return 1332 } 1333 } 1334 } 1335 1336 type waitSubordinateDying struct{} 1337 1338 func (waitSubordinateDying) step(c *gc.C, ctx *testContext) { 1339 timeout := time.After(worstCase) 1340 for { 1341 1342 select { 1343 case <-timeout: 1344 c.Fatalf("subordinate was not made Dying") 1345 case <-time.After(coretesting.ShortWait): 1346 err := ctx.subordinate.Refresh() 1347 c.Assert(err, jc.ErrorIsNil) 1348 if ctx.subordinate.Life() != state.Dying { 1349 continue 1350 } 1351 } 1352 break 1353 } 1354 } 1355 1356 type removeSubordinate struct{} 1357 1358 func (removeSubordinate) step(c *gc.C, ctx *testContext) { 1359 err := ctx.subordinate.EnsureDead() 1360 c.Assert(err, jc.ErrorIsNil) 1361 err = ctx.subordinate.Remove() 1362 c.Assert(err, jc.ErrorIsNil) 1363 ctx.subordinate = nil 1364 } 1365 1366 type writeFile struct { 1367 path string 1368 mode os.FileMode 1369 } 1370 1371 func (s writeFile) step(c *gc.C, ctx *testContext) { 1372 path := filepath.Join(ctx.path, s.path) 1373 dir := filepath.Dir(path) 1374 err := os.MkdirAll(dir, 0755) 1375 c.Assert(err, jc.ErrorIsNil) 1376 err = os.WriteFile(path, nil, s.mode) 1377 c.Assert(err, jc.ErrorIsNil) 1378 } 1379 1380 type removeCharmDir struct{} 1381 1382 func (s removeCharmDir) step(c *gc.C, ctx *testContext) { 1383 path := filepath.Join(ctx.path, "charm") 1384 err := os.RemoveAll(path) 1385 c.Assert(err, jc.ErrorIsNil) 1386 } 1387 1388 type custom struct { 1389 f func(*gc.C, *testContext) 1390 } 1391 1392 func (s custom) step(c *gc.C, ctx *testContext) { 1393 s.f(c, ctx) 1394 } 1395 1396 var relationDying = custom{func(c *gc.C, ctx *testContext) { 1397 c.Check(ctx.relation.Refresh(), gc.IsNil) 1398 c.Assert(ctx.relation.Destroy(), gc.IsNil) 1399 }} 1400 1401 var unitDying = custom{func(c *gc.C, ctx *testContext) { 1402 c.Assert(ctx.unit.Destroy(), gc.IsNil) 1403 }} 1404 1405 var unitDead = custom{func(c *gc.C, ctx *testContext) { 1406 c.Assert(ctx.unit.EnsureDead(), gc.IsNil) 1407 }} 1408 1409 var subordinateDying = custom{func(c *gc.C, ctx *testContext) { 1410 c.Assert(ctx.subordinate.Destroy(), gc.IsNil) 1411 }} 1412 1413 func curl(revision int) string { 1414 // This functionality is highly depended on by the local 1415 // defaultCharmOrigin function. Any changes must be made 1416 // in both locations. 1417 return jujucharm.MustParseURL("ch:quantal/wordpress").WithRevision(revision).String() 1418 } 1419 1420 type hookLock struct { 1421 releaser func() 1422 } 1423 1424 type hookStep struct { 1425 stepFunc func(*gc.C, *testContext) 1426 } 1427 1428 func (h *hookStep) step(c *gc.C, ctx *testContext) { 1429 h.stepFunc(c, ctx) 1430 } 1431 1432 func (h *hookLock) acquire() *hookStep { 1433 return &hookStep{stepFunc: func(c *gc.C, ctx *testContext) { 1434 releaser, err := processLock.Acquire(machinelock.Spec{ 1435 Worker: "uniter-test", 1436 Comment: "hookLock", 1437 Cancel: make(chan struct{}), // clearly suboptimal 1438 }) 1439 c.Assert(err, jc.ErrorIsNil) 1440 h.releaser = releaser 1441 }} 1442 } 1443 1444 func (h *hookLock) release() *hookStep { 1445 return &hookStep{stepFunc: func(c *gc.C, ctx *testContext) { 1446 c.Assert(h.releaser, gc.NotNil) 1447 h.releaser() 1448 h.releaser = nil 1449 }} 1450 } 1451 1452 type runCommands []string 1453 1454 func (cmds runCommands) step(c *gc.C, ctx *testContext) { 1455 commands := strings.Join(cmds, "\n") 1456 args := uniter.RunCommandsArgs{ 1457 Commands: commands, 1458 RelationId: -1, 1459 RemoteUnitName: "", 1460 UnitName: "u/0", 1461 } 1462 result, err := ctx.uniter.RunCommands(args) 1463 c.Assert(err, jc.ErrorIsNil) 1464 c.Check(result.Code, gc.Equals, 0) 1465 c.Check(string(result.Stdout), gc.Equals, "test on workload") 1466 c.Check(string(result.Stderr), gc.Equals, "") 1467 } 1468 1469 type forceMinion struct{} 1470 1471 func (forceMinion) step(c *gc.C, ctx *testContext) { 1472 ctx.leaderTracker.setLeader(c, false) 1473 } 1474 1475 type forceLeader struct{} 1476 1477 func (forceLeader) step(c *gc.C, ctx *testContext) { 1478 ctx.leaderTracker.setLeader(c, true) 1479 } 1480 1481 func newMockLeaderTracker(ctx *testContext) *mockLeaderTracker { 1482 return &mockLeaderTracker{ 1483 ctx: ctx, 1484 } 1485 } 1486 1487 type mockLeaderTracker struct { 1488 mu sync.Mutex 1489 ctx *testContext 1490 isLeader bool 1491 waiting []chan struct{} 1492 } 1493 1494 func (mock *mockLeaderTracker) Kill() { 1495 return 1496 } 1497 1498 func (mock *mockLeaderTracker) Wait() error { 1499 return nil 1500 } 1501 1502 func (mock *mockLeaderTracker) ApplicationName() string { 1503 return mock.ctx.application.Name() 1504 } 1505 1506 func (mock *mockLeaderTracker) ClaimDuration() time.Duration { 1507 return 30 * time.Second 1508 } 1509 1510 func (mock *mockLeaderTracker) ClaimLeader() leadership.Ticket { 1511 mock.mu.Lock() 1512 defer mock.mu.Unlock() 1513 if mock.isLeader { 1514 return fastTicket{true} 1515 } 1516 return fastTicket{} 1517 } 1518 1519 func (mock *mockLeaderTracker) WaitLeader() leadership.Ticket { 1520 mock.mu.Lock() 1521 defer mock.mu.Unlock() 1522 if mock.isLeader { 1523 return fastTicket{} 1524 } 1525 return mock.waitTicket() 1526 } 1527 1528 func (mock *mockLeaderTracker) WaitMinion() leadership.Ticket { 1529 mock.mu.Lock() 1530 defer mock.mu.Unlock() 1531 if !mock.isLeader { 1532 return fastTicket{} 1533 } 1534 return mock.waitTicket() 1535 } 1536 1537 func (mock *mockLeaderTracker) waitTicket() leadership.Ticket { 1538 // very internal, expects mu to be locked already 1539 ch := make(chan struct{}) 1540 mock.waiting = append(mock.waiting, ch) 1541 return waitTicket{ch} 1542 } 1543 1544 func (mock *mockLeaderTracker) setLeader(c *gc.C, isLeader bool) { 1545 mock.mu.Lock() 1546 defer mock.mu.Unlock() 1547 if mock.isLeader == isLeader { 1548 return 1549 } 1550 if isLeader { 1551 claimer, err := mock.ctx.leaseManager.Claimer("application-leadership", mock.ctx.st.ModelUUID()) 1552 c.Assert(err, jc.ErrorIsNil) 1553 err = claimer.Claim( 1554 mock.ctx.application.Name(), mock.ctx.unit.Name(), time.Minute, 1555 ) 1556 c.Assert(err, jc.ErrorIsNil) 1557 } else { 1558 time.Sleep(coretesting.ShortWait) 1559 } 1560 mock.isLeader = isLeader 1561 for _, ch := range mock.waiting { 1562 close(ch) 1563 } 1564 mock.waiting = nil 1565 } 1566 1567 type waitTicket struct { 1568 ch chan struct{} 1569 } 1570 1571 func (t waitTicket) Ready() <-chan struct{} { 1572 return t.ch 1573 } 1574 1575 func (t waitTicket) Wait() bool { 1576 return false 1577 } 1578 1579 type fastTicket struct { 1580 value bool 1581 } 1582 1583 func (fastTicket) Ready() <-chan struct{} { 1584 ch := make(chan struct{}) 1585 close(ch) 1586 return ch 1587 } 1588 1589 func (t fastTicket) Wait() bool { 1590 return t.value 1591 } 1592 1593 type setLeaderSettings map[string]string 1594 1595 func (s setLeaderSettings) step(c *gc.C, ctx *testContext) { 1596 // We do this directly on State, not the API, so we don't have to worry 1597 // about getting an API conn for whatever unit's meant to be leader. 1598 err := ctx.application.UpdateLeaderSettings(successToken{}, s) 1599 c.Assert(err, jc.ErrorIsNil) 1600 } 1601 1602 type successToken struct{} 1603 1604 func (successToken) Check() error { 1605 return nil 1606 } 1607 1608 type mockCharmDirGuard struct{} 1609 1610 // Unlock implements fortress.Guard. 1611 func (*mockCharmDirGuard) Unlock() error { return nil } 1612 1613 // Lockdown implements fortress.Guard. 1614 func (*mockCharmDirGuard) Lockdown(_ fortress.Abort) error { return nil } 1615 1616 type provisionStorage struct{} 1617 1618 func (s provisionStorage) step(c *gc.C, ctx *testContext) { 1619 sb, err := state.NewStorageBackend(ctx.st) 1620 c.Assert(err, jc.ErrorIsNil) 1621 storageAttachments, err := sb.UnitStorageAttachments(ctx.unit.UnitTag()) 1622 c.Assert(err, jc.ErrorIsNil) 1623 c.Assert(storageAttachments, gc.HasLen, 1) 1624 1625 filesystem, err := sb.StorageInstanceFilesystem(storageAttachments[0].StorageInstance()) 1626 c.Assert(err, jc.ErrorIsNil) 1627 1628 filesystemInfo := state.FilesystemInfo{ 1629 Size: 1024, 1630 FilesystemId: "fs-id", 1631 } 1632 err = sb.SetFilesystemInfo(filesystem.FilesystemTag(), filesystemInfo) 1633 c.Assert(err, jc.ErrorIsNil) 1634 1635 machineId, err := ctx.unit.AssignedMachineId() 1636 c.Assert(err, jc.ErrorIsNil) 1637 1638 filesystemAttachmentInfo := state.FilesystemAttachmentInfo{ 1639 MountPoint: "/srv/wordpress/content", 1640 } 1641 err = sb.SetFilesystemAttachmentInfo( 1642 names.NewMachineTag(machineId), 1643 filesystem.FilesystemTag(), 1644 filesystemAttachmentInfo, 1645 ) 1646 c.Assert(err, jc.ErrorIsNil) 1647 } 1648 1649 type destroyStorageAttachment struct{} 1650 1651 func (s destroyStorageAttachment) step(c *gc.C, ctx *testContext) { 1652 sb, err := state.NewStorageBackend(ctx.st) 1653 c.Assert(err, jc.ErrorIsNil) 1654 storageAttachments, err := sb.UnitStorageAttachments(ctx.unit.UnitTag()) 1655 c.Assert(err, jc.ErrorIsNil) 1656 c.Assert(storageAttachments, gc.HasLen, 1) 1657 err = sb.DetachStorage( 1658 storageAttachments[0].StorageInstance(), 1659 ctx.unit.UnitTag(), 1660 false, 1661 time.Duration(0), 1662 ) 1663 c.Assert(err, jc.ErrorIsNil) 1664 } 1665 1666 type verifyStorageDetached struct{} 1667 1668 func (s verifyStorageDetached) step(c *gc.C, ctx *testContext) { 1669 sb, err := state.NewStorageBackend(ctx.st) 1670 c.Assert(err, jc.ErrorIsNil) 1671 storageAttachments, err := sb.UnitStorageAttachments(ctx.unit.UnitTag()) 1672 c.Assert(err, jc.ErrorIsNil) 1673 c.Assert(storageAttachments, gc.HasLen, 0) 1674 } 1675 1676 func ptr[T any](v T) *T { 1677 return &v 1678 } 1679 1680 type createSecret struct { 1681 applicationName string 1682 } 1683 1684 func (s createSecret) step(c *gc.C, ctx *testContext) { 1685 if s.applicationName == "" { 1686 s.applicationName = "u" 1687 } 1688 1689 uri := secrets.NewURI() 1690 store := state.NewSecrets(ctx.st) 1691 _, err := store.CreateSecret(uri, state.CreateSecretParams{ 1692 UpdateSecretParams: state.UpdateSecretParams{ 1693 LeaderToken: &fakeToken{}, 1694 RotatePolicy: ptr(secrets.RotateDaily), 1695 NextRotateTime: ptr(time.Now().Add(time.Hour)), 1696 Data: map[string]string{"foo": "bar"}, 1697 }, 1698 Owner: names.NewApplicationTag(s.applicationName), 1699 }) 1700 c.Assert(err, jc.ErrorIsNil) 1701 appTag := names.NewApplicationTag(s.applicationName) 1702 err = ctx.st.GrantSecretAccess(uri, state.SecretAccessParams{ 1703 LeaderToken: &fakeToken{}, 1704 Scope: appTag, 1705 Subject: appTag, 1706 Role: secrets.RoleManage, 1707 }) 1708 c.Assert(err, jc.ErrorIsNil) 1709 ctx.createdSecretURI = uri 1710 } 1711 1712 type fakeToken struct{} 1713 1714 func (t *fakeToken) Check() error { 1715 return nil 1716 } 1717 1718 type changeSecret struct{} 1719 1720 func (s changeSecret) step(c *gc.C, ctx *testContext) { 1721 store := state.NewSecrets(ctx.st) 1722 _, err := store.UpdateSecret(ctx.createdSecretURI, state.UpdateSecretParams{ 1723 LeaderToken: &fakeToken{}, 1724 Data: map[string]string{"foo": "bar2"}, 1725 }) 1726 c.Assert(err, jc.ErrorIsNil) 1727 } 1728 1729 type getSecret struct{} 1730 1731 func (s getSecret) step(c *gc.C, ctx *testContext) { 1732 val, err := ctx.secretBackends.GetContent(ctx.createdSecretURI, "foorbar", false, false) 1733 c.Assert(err, jc.ErrorIsNil) 1734 c.Assert(val.EncodedValues(), jc.DeepEquals, map[string]string{"foo": "bar"}) 1735 } 1736 1737 type rotateSecret struct{} 1738 1739 func (s rotateSecret) step(c *gc.C, ctx *testContext) { 1740 select { 1741 case ctx.secretsRotateCh <- []string{ctx.createdSecretURI.String()}: 1742 case <-time.After(coretesting.LongWait): 1743 c.Fatalf("sending rotate secret change for %q", ctx.createdSecretURI) 1744 } 1745 } 1746 1747 type expireSecret struct{} 1748 1749 func (s expireSecret) step(c *gc.C, ctx *testContext) { 1750 select { 1751 case ctx.secretsExpireCh <- []string{ctx.createdSecretURI.String() + "/1"}: 1752 case <-time.After(coretesting.LongWait): 1753 c.Fatalf(`sending expire secret change for "%s/1"`, ctx.createdSecretURI) 1754 } 1755 } 1756 1757 type expectError struct { 1758 err string 1759 } 1760 1761 func (s expectError) step(_ *gc.C, ctx *testContext) { 1762 ctx.setExpectedError(s.err) 1763 } 1764 1765 // manualTicker will be used to generate collect-metrics events 1766 // in a time-independent manner for testing. 1767 type manualTicker struct { 1768 c chan time.Time 1769 } 1770 1771 // Tick sends a signal on the ticker channel. 1772 func (t *manualTicker) Tick() error { 1773 select { 1774 case t.c <- time.Now(): 1775 case <-time.After(worstCase): 1776 return fmt.Errorf("ticker channel blocked") 1777 } 1778 return nil 1779 } 1780 1781 type dummyWaiter struct { 1782 c chan time.Time 1783 } 1784 1785 func (w dummyWaiter) After() <-chan time.Time { 1786 return w.c 1787 } 1788 1789 // ReturnTimer can be used to replace the update status signal generator. 1790 func (t *manualTicker) ReturnTimer() remotestate.UpdateStatusTimerFunc { 1791 return func(_ time.Duration) remotestate.Waiter { 1792 return dummyWaiter{t.c} 1793 } 1794 } 1795 1796 func newManualTicker() *manualTicker { 1797 return &manualTicker{ 1798 c: make(chan time.Time), 1799 } 1800 } 1801 1802 // Instead of having a machine level lock that we have real contention with, 1803 // we instead fake it by creating a process lock. This will block callers within 1804 // the same process. This is necessary due to the function above to return the 1805 // machine lock. We create it once at process initialisation time and use it any 1806 // time the function is asked for. 1807 var processLock machinelock.Lock 1808 1809 func init() { 1810 processLock = &fakemachinelock{} 1811 } 1812 1813 type fakemachinelock struct { 1814 machinelock.Lock 1815 mu sync.Mutex 1816 } 1817 1818 func (f *fakemachinelock) Acquire(_ machinelock.Spec) (func(), error) { 1819 f.mu.Lock() 1820 return func() { 1821 f.mu.Unlock() 1822 }, nil 1823 } 1824 1825 type activateTestContainer struct { 1826 containerName string 1827 } 1828 1829 func (s activateTestContainer) step(c *gc.C, ctx *testContext) { 1830 ctx.pebbleClients[s.containerName].TriggerStart() 1831 } 1832 1833 type injectTestContainer struct { 1834 containerName string 1835 } 1836 1837 func (s injectTestContainer) step(c *gc.C, ctx *testContext) { 1838 c.Assert(ctx.uniter, gc.IsNil) 1839 ctx.containerNames = append(ctx.containerNames, s.containerName) 1840 if ctx.pebbleClients == nil { 1841 ctx.pebbleClients = make(map[string]*fakePebbleClient) 1842 } 1843 ctx.pebbleClients[s.containerName] = &fakePebbleClient{ 1844 err: errors.BadRequestf("not ready yet"), 1845 clock: testclock.NewClock(time.Time{}), 1846 } 1847 } 1848 1849 type triggerShutdown struct { 1850 } 1851 1852 func (t triggerShutdown) step(c *gc.C, ctx *testContext) { 1853 err := ctx.uniter.Terminate() 1854 c.Assert(err, jc.ErrorIsNil) 1855 }